diff --git a/OWNERS b/OWNERS index ef4320ae..425ca3f8 100644 --- a/OWNERS +++ b/OWNERS @@ -8,4 +8,3 @@ reviewers: - ksimon1 - lyarwood - jcanocan - - opokornyy diff --git a/go.mod b/go.mod index 3123ea07..43fc3a82 100644 --- a/go.mod +++ b/go.mod @@ -4,17 +4,17 @@ go 1.22.4 require ( github.com/emicklei/go-restful/v3 v3.12.1 - github.com/fsnotify/fsnotify v1.7.0 - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/fsnotify/fsnotify v1.8.0 + github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang/mock v1.6.0 - github.com/onsi/ginkgo/v2 v2.19.1 - github.com/onsi/gomega v1.34.1 + github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/gomega v1.35.1 k8s.io/api v0.30.3 - k8s.io/apimachinery v0.30.3 + k8s.io/apimachinery v0.31.2 k8s.io/client-go v0.30.3 - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 - kubevirt.io/api v1.3.0 - kubevirt.io/client-go v1.3.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + kubevirt.io/api v1.3.1 + kubevirt.io/client-go v1.3.1 kubevirt.io/vm-console-proxy/api v0.0.0 sigs.k8s.io/yaml v1.4.0 ) @@ -22,6 +22,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -31,18 +32,18 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.2 // indirect + github.com/golang/glog v1.2.3 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1 // indirect + github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.5 // indirect github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -54,24 +55,24 @@ require ( github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.24.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + golang.org/x/tools v0.27.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.30.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.30.1 // indirect - kubevirt.io/containerized-data-importer-api v1.59.0 // indirect + kubevirt.io/containerized-data-importer-api v1.60.3 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index a275f86a..57254026 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,10 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= @@ -111,11 +113,11 @@ github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/K github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.3 h1:oDTdz9f5VGVVNGu/Q7UXKWYsD0873HXLHdJUNBsSEKM= +github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -162,8 +164,9 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -194,8 +197,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1 h1:n4FpoJ6aGDx8ULfya/C4ycrMDuPZlf7AtPyrT4+rIP4= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.5 h1:CELpSMPSyicFBaVsxROmfrWlu9yr3Dduk+y7vGrIsx8= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.5/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -259,8 +262,8 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7 github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -283,8 +286,8 @@ github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3ev github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/openshift/api v0.0.0-20240717221938-8da8de571496 h1:W04T0qag8pqW3xPW4L8Ximmenic068wlmcwpHJgLk5U= github.com/openshift/api v0.0.0-20240717221938-8da8de571496/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM= github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 h1:JtLhaGpSEconE+1IKmIgCOof/Len5ceG6H1pk43yv5U= @@ -298,12 +301,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 h1:BpGDC87A2SaxbKgONsFLEX3kRcRJee2aLQbjXsuz0hA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -323,6 +326,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -355,8 +360,6 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -432,15 +435,15 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -509,8 +512,8 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -530,8 +533,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -548,14 +551,14 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -597,8 +600,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58 golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -644,8 +647,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -682,8 +685,8 @@ k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmi k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= @@ -706,14 +709,14 @@ k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90= -kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s= -kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4= -kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc= -kubevirt.io/containerized-data-importer-api v1.59.0 h1:GdDt9BlR0qHejpMaPfASbsG8JWDmBf1s7xZBj5W9qn0= -kubevirt.io/containerized-data-importer-api v1.59.0/go.mod h1:4yOGtCE7HvgKp7wftZZ3TBvDJ0x9d6N6KaRjRYcUFpE= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= +kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= +kubevirt.io/client-go v1.3.1 h1:KVYwuGPV10W282GbpTha6KaW0fXwbx+PiW4Eyb7Ibnc= +kubevirt.io/client-go v1.3.1/go.mod h1:vtmC6jEe5NHI3DUXRKpD3m2CjIA/yDQSAujxCE7lHSk= +kubevirt.io/containerized-data-importer-api v1.60.3 h1:kQEXi7scpzUa0RPf3/3MKk1Kmem0ZlqqiuK3kDF5L2I= +kubevirt.io/containerized-data-importer-api v1.60.3/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 h1:fZYvD3/Vnitfkx6IJxjLAk8ugnZQ7CXVYcRfkSKmuZY= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992..f4e7dbf3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad89585..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076..daea9dd6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575..fa854785 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759..e4ac2a2f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1d..c349c326 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e..36c31169 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915..d8de5ab7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c0..5eb5dbc6 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d..c54a6308 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4..0760efe9 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000..b0eab100 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000..928319fb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000..3186b0c3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000..f69fdb93 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000..607e683b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000..35c734be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000..e5b3b6f6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000..1dd455bc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000..f1b2e73b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000..52bf4ce5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000..547df1df --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000..7daa45e1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000..30976ce9 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000..37dfeddc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000..a72c6495 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6..00000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8..f65e8fe3 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa7..a29fc7aa 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 00000000..f1c181ec --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 00000000..38cb9ae1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..c794b2b0 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 00000000..de0965e1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 00000000..eaa85049 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 00000000..af0a7950 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 00000000..9c05146d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 00000000..823bff12 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 00000000..ea0f39e2 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 00000000..ec038a49 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 00000000..85842ac7 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 00000000..44afb866 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 00000000..23f68b98 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 00000000..6508e291 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 00000000..8b4b4bbc --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 00000000..31c39336 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 00000000..de175cee --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 00000000..507ab6c1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 00000000..81228acf --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 00000000..5c4d2b7a --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 00000000..b40793b9 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index c0a6f692..9dd36e5a 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -36,19 +36,21 @@ func NewParser(options ...ParserOption) *Parser { return p } -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. +// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will +// receive the parsed token and should return the key for validating. func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } -// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims -// interface. This provides default values which can be overridden and allows a caller to use their own type, rather -// than the default MapClaims implementation of Claims. +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object +// implementing the Claims interface. This provides default values which can be overridden and +// allows a caller to use their own type, rather than the default MapClaims implementation of +// Claims. // -// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), -// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the -// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such +// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or +// b) if you are using a pointer, allocate the proper memory for it before passing in the overall +// claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { @@ -85,12 +87,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } + // Perform validation + token.Signature = parts[2] + if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid} + } + vErr := &ValidationError{} // Validate Claims if !p.SkipClaimsValidation { if err := token.Claims.Valid(); err != nil { - // If the Claims Valid returned an error, check if it is a validation error, // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set if e, ok := err.(*ValidationError); !ok { @@ -98,22 +105,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } else { vErr = e } + return token, vErr } } - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } + // No errors so far, token is valid. + token.Valid = true - return token, vErr + return token, nil } // ParseUnverified parses the token but doesn't validate the signature. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index 8c00e737..1b632e07 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -76,7 +76,7 @@ // -log_backtrace_at=gopherflakes.go:234 // A stack trace will be written to the Info log whenever execution // hits one of these statements. (Unlike with -vmodule, the ".go" -// must bepresent.) +// must be present.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index 8eb8b08c..2b478ae6 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -158,7 +158,10 @@ var sinks struct { func init() { // Register stderr first: that way if we crash during file-writing at least // the log will have gone somewhere. - logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + if shouldRegisterStderrSink() { + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr) + } + logsink.TextSinks = append(logsink.TextSinks, &sinks.file) sinks.file.flushChan = make(chan logsink.Severity, 1) go sinks.file.flushDaemon() diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go index d5cdb793..a0089ba4 100644 --- a/vendor/github.com/golang/glog/glog_file_nonwindows.go +++ b/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -4,6 +4,13 @@ package glog import "os/user" +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this always returns true on non-Windows platforms, as it specifically checks for a +// condition that is only present on Windows. +func shouldRegisterStderrSink() bool { + return true +} + func lookupUser() string { if current, err := user.Current(); err == nil { return current.Username diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go index a9e4f609..2f032e19 100644 --- a/vendor/github.com/golang/glog/glog_file_windows.go +++ b/vendor/github.com/golang/glog/glog_file_windows.go @@ -3,9 +3,22 @@ package glog import ( + "os" "syscall" ) +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this checks if stderr is "valid", in that it maps to a non-NULL Handle. +// Windows Services are spawned without Stdout and Stderr, so any attempt to use them equates to +// referencing an invalid file Handle. +// os.Stderr's FD is derived from a call to `syscall.GetStdHandle(syscall.STD_ERROR_HANDLE)`. +// Documentation[1] for the GetStdHandle function indicates the return value may be NULL if the +// application lacks the standard handle, so consider Stderr valid if its FD is non-NULL. +// [1]: https://learn.microsoft.com/en-us/windows/console/getstdhandle +func shouldRegisterStderrSink() bool { + return os.Stderr.Fd() != 0 +} + // This follows the logic in the standard library's user.Current() function, except // that it leaves out the potentially expensive calls required to look up the user's // display name in Active Directory. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 860bb304..8ce9d3cf 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -122,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -237,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -384,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index eee0132e..ba4d7464 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 62df80a5..f47a2439 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -39,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -53,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -555,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -844,10 +849,10 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 782817c3..3011efb5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,39 @@ +## 2.21.0 + + + ### Features + - add support for GINKGO_TIME_FORMAT [a69eb39] + - add GINKGO_NO_COLOR to disable colors via environment variables [bcab9c8] + + ### Fixes + - increase threshold in timeline matcher [e548367] + - Fix the document by replacing `SpecsThatWillBeRun` with `SpecsThatWillRun` + [c2c4d3c] + + ### Maintenance + - bump various dependencies [7e65a00] + +## 2.20.2 + +Require Go 1.22+ + +### Maintenance +- bump go to v1.22 [a671816] + +## 2.20.1 + +### Fixes +- make BeSpecEvent duration matcher more forgiving [d6f9640] + +## 2.20.0 + +### Features +- Add buildvcs flag [be5ab95] + +### Maintenance +- Add update-deps to makefile [d303d14] +- bump all dependencies [7a50221] + ## 2.19.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile index cb099aff..06dff97c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/Makefile +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -4,8 +4,13 @@ all: vet test .PHONY: test test: - go run github.com/onsi/ginkgo/v2/ginkgo -r -p + go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going .PHONY: vet vet: go vet ./... + +.PHONY: update-deps +update-deps: + go get -u ./... + go mod tidy \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 743555dd..4d574911 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter { return fmt.Sprintf("\x1b[38;5;%dm", colorCode) } + if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { + colorMode = ColorModeNone + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 9ca408ea..8c0dfab8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -202,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -327,7 +328,7 @@ var ParallelConfigFlags = GinkgoFlags{ // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", - Usage: "If set, suppress color output in default reporter."}, + Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -528,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index aae69b04..ddcbec1b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -3,13 +3,21 @@ package types import ( "encoding/json" "fmt" + "os" "sort" "strings" "time" ) const GINKGO_FOCUS_EXIT_CODE = 197 -const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +func init() { + if os.Getenv("GINKGO_TIME_FORMAT") != "" { + GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") + } +} // Report captures information about a Ginkgo test run type Report struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index f2ee0501..caf3c9f5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.19.1" +const VERSION = "2.21.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index c6c34d65..9f6090b8 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,31 @@ +## 1.35.1 + +### Fixes +- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1] + +## 1.35.0 + +### Features + +- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265] +- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931] + +### Fixes + +- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e] + +### Maintenance + +- Bump all dependencies [a05a416] + +## 1.34.2 + +Require Go 1.22+ + +### Maintenance +- bump ginkgo as well [c59c6dc] +- bump to go 1.22 - remove x/exp dependency [8158b99] + ## 1.34.1 ### Maintenance diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 2546ccce..1038d7dd 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.34.1" +const GOMEGA_VERSION = "1.35.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. + +By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: + + Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) + +now either the context cacnellation or the timeout will cause Eventually to stop polling. + +If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: + + EnforceDefaultTimeoutsWhenUsingContexts() + +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { Default.SetDefaultConsistentlyPollingInterval(t) } +// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided. +func EnforceDefaultTimeoutsWhenUsingContexts() { + Default.EnforceDefaultTimeoutsWhenUsingContexts() +} + +// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`. +func DisableDefaultTimeoutsWhenUsingContext() { + Default.DisableDefaultTimeoutsWhenUsingContext() +} + // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index cde9e2ec..8b4cd1f5 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { if assertion.asyncType == AsyncAssertionTypeConsistently { return time.After(assertion.g.DurationBundle.ConsistentlyDuration) } else { - if assertion.ctx == nil { + if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts { return time.After(assertion.g.DurationBundle.EventuallyTimeout) } else { return nil @@ -496,7 +496,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch for _, err := range []error{actualErr, matcherErr} { if pollingSignalErr, ok := AsPollingSignalError(err); ok { if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") + if pollingSignalErr.IsSuccessful() { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)") + } else { + return true // early escape hatch for Consistently + } + } else { + fail("Told to stop trying") + } return false } if pollingSignalErr.IsTryAgainAfter() { diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 6e0d90d3..2e026c33 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -8,10 +8,11 @@ import ( ) type DurationBundle struct { - EventuallyTimeout time.Duration - EventuallyPollingInterval time.Duration - ConsistentlyDuration time.Duration - ConsistentlyPollingInterval time.Duration + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration + EnforceDefaultTimeoutsWhenUsingContexts bool } const ( @@ -20,15 +21,19 @@ const ( ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + + EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS" ) func FetchDefaultDurationBundle() DurationBundle { + _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName) return DurationBundle{ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), - ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), - ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts, } } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index de1f4f33..c6e2fcc0 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { g.DurationBundle.ConsistentlyPollingInterval = t } + +func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true +} + +func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false +} diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 83b04b1a..3a4f7ddd 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -17,6 +17,7 @@ type PollingSignalError interface { error Wrap(err error) PollingSignalError Attach(description string, obj any) PollingSignalError + Successfully() PollingSignalError Now() } @@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct { wrappedErr error pollingSignalErrorType PollingSignalErrorType duration time.Duration + successful bool Attachments []PollingSignalErrorAttachment } @@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error { return s.wrappedErr } +func (s *PollingSignalErrorImpl) Successfully() PollingSignalError { + s.successful = true + return s +} + func (s *PollingSignalErrorImpl) Now() { panic(s) } @@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying } +func (s *PollingSignalErrorImpl) IsSuccessful() bool { + return s.successful +} + func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter } diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 6989f78c..8dd3f871 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (interface{}, error) { +func extractField(actual interface{}, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -64,36 +64,46 @@ func extractField(actual interface{}, field string, matchername string) (interfa type HaveFieldMatcher struct { Field string Expected interface{} +} - extractedField interface{} - expectedMatcher omegaMatcher +func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { + var isMatcher bool + expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher) + if !isMatcher { + expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + return expectedMatcher } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") + extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } - var isMatcher bool - matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) - if !isMatcher { - matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} - } - - return matcher.expectedMatcher.Match(matcher.extractedField) + return matcher.expectedMatcher().Match(extractedField) } func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) - message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().FailureMessage(extractedField) return message } func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) - message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().NegatedFailureMessage(extractedField) return message } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 4339acc6..44aa61d4 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,7 +1,7 @@ package bipartitegraph import ( - "golang.org/x/exp/slices" + "slices" . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 7c7adb94..30f2beed 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -29,6 +29,8 @@ type Gomega interface { SetDefaultEventuallyPollingInterval(time.Duration) SetDefaultConsistentlyDuration(time.Duration) SetDefaultConsistentlyPollingInterval(time.Duration) + EnforceDefaultTimeoutsWhenUsingContexts() + DisableDefaultTimeoutsWhenUsingContext() } // All Gomega matchers must implement the GomegaMatcher interface diff --git a/vendor/github.com/openshift/api/config/v1/Makefile b/vendor/github.com/openshift/api/config/v1/Makefile deleted file mode 100644 index 66bf6363..00000000 --- a/vendor/github.com/openshift/api/config/v1/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -.PHONY: test -test: - make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go deleted file mode 100644 index f9945475..00000000 --- a/vendor/github.com/openshift/api/config/v1/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true -// +openshift:featuregated-schema-gen=true - -// +kubebuilder:validation:Optional -// +groupName=config.openshift.io -// Package v1 is the v1 version of the API. -package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go deleted file mode 100644 index 61302592..00000000 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ /dev/null @@ -1,78 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - GroupName = "config.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme -) - -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, - &APIServer{}, - &APIServerList{}, - &Authentication{}, - &AuthenticationList{}, - &Build{}, - &BuildList{}, - &ClusterOperator{}, - &ClusterOperatorList{}, - &ClusterVersion{}, - &ClusterVersionList{}, - &Console{}, - &ConsoleList{}, - &DNS{}, - &DNSList{}, - &FeatureGate{}, - &FeatureGateList{}, - &Image{}, - &ImageList{}, - &Infrastructure{}, - &InfrastructureList{}, - &Ingress{}, - &IngressList{}, - &Node{}, - &NodeList{}, - &Network{}, - &NetworkList{}, - &OAuth{}, - &OAuthList{}, - &OperatorHub{}, - &OperatorHubList{}, - &Project{}, - &ProjectList{}, - &Proxy{}, - &ProxyList{}, - &Scheduler{}, - &SchedulerList{}, - &ImageContentPolicy{}, - &ImageContentPolicyList{}, - &ImageDigestMirrorSet{}, - &ImageDigestMirrorSetList{}, - &ImageTagMirrorSet{}, - &ImageTagMirrorSetList{}, - ) - metav1.AddToGroupVersion(scheme, GroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go deleted file mode 100644 index 6a5718c1..00000000 --- a/vendor/github.com/openshift/api/config/v1/stringsource.go +++ /dev/null @@ -1,31 +0,0 @@ -package v1 - -import "encoding/json" - -// UnmarshalJSON implements the json.Unmarshaller interface. -// If the value is a string, it sets the Value field of the StringSource. -// Otherwise, it is unmarshaled into the StringSourceSpec struct -func (s *StringSource) UnmarshalJSON(value []byte) error { - // If we can unmarshal to a simple string, just set the value - var simpleValue string - if err := json.Unmarshal(value, &simpleValue); err == nil { - s.Value = simpleValue - return nil - } - - // Otherwise do the full struct unmarshal - return json.Unmarshal(value, &s.StringSourceSpec) -} - -// MarshalJSON implements the json.Marshaller interface. -// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. -// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. -func (s *StringSource) MarshalJSON() ([]byte, error) { - // If we have only a cleartext value set, do a simple string marshal - if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { - return json.Marshal(s.Value) - } - - // Otherwise do the full struct marshal of the externalized bits - return json.Marshal(s.StringSourceSpec) -} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go deleted file mode 100644 index d4d09e7f..00000000 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ /dev/null @@ -1,434 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ConfigMapFileReference references a config map in a specific namespace. -// The namespace must be specified at the point of use. -type ConfigMapFileReference struct { - Name string `json:"name"` - // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. - Key string `json:"key,omitempty"` -} - -// ConfigMapNameReference references a config map in a specific namespace. -// The namespace must be specified at the point of use. -type ConfigMapNameReference struct { - // name is the metadata.name of the referenced config map - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` -} - -// SecretNameReference references a secret in a specific namespace. -// The namespace must be specified at the point of use. -type SecretNameReference struct { - // name is the metadata.name of the referenced secret - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` -} - -// HTTPServingInfo holds configuration for serving HTTP -type HTTPServingInfo struct { - // ServingInfo is the HTTP serving information - ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. - MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if - // -1 there is no limit on requests. - RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` -} - -// ServingInfo holds information about serving web pages -type ServingInfo struct { - // BindAddress is the ip:port to serve on - BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", - // "tcp4", and "tcp6" - BindNetwork string `json:"bindNetwork"` - // CertInfo is the TLS cert info for serving secure traffic. - // this is anonymous so that we can inline it for serialization - CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates - // +optional - ClientCA string `json:"clientCA,omitempty"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames - NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` - // MinTLSVersion is the minimum TLS version supported. - // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants - MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. - // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants - CipherSuites []string `json:"cipherSuites,omitempty"` -} - -// CertInfo relates a certificate with a private key -type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate - CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile - KeyFile string `json:"keyFile"` -} - -// NamedCertificate specifies a certificate/key, and the names it should be served for -type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure - // A name can be a normal DNS name, or can contain leading wildcard segments. - Names []string `json:"names,omitempty"` - // CertInfo is the TLS cert info for serving secure traffic - CertInfo `json:",inline"` -} - -// LeaderElection provides information to elect a leader -type LeaderElection struct { - // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. - Disable bool `json:"disable,omitempty"` - // namespace indicates which namespace the resource is in - Namespace string `json:"namespace,omitempty"` - // name indicates what name to use for the resource - Name string `json:"name,omitempty"` - - // leaseDuration is the duration that non-leader candidates will wait - // after observing a leadership renewal until attempting to acquire - // leadership of a led but unrenewed leader slot. This is effectively the - // maximum duration that a leader can be stopped before it is replaced - // by another candidate. This is only applicable if leader election is - // enabled. - // +nullable - LeaseDuration metav1.Duration `json:"leaseDuration"` - // renewDeadline is the interval between attempts by the acting master to - // renew a leadership slot before it stops leading. This must be less - // than or equal to the lease duration. This is only applicable if leader - // election is enabled. - // +nullable - RenewDeadline metav1.Duration `json:"renewDeadline"` - // retryPeriod is the duration the clients should wait between attempting - // acquisition and renewal of a leadership. This is only applicable if - // leader election is enabled. - // +nullable - RetryPeriod metav1.Duration `json:"retryPeriod"` -} - -// StringSource allows specifying a string inline, or externally via env var or file. -// When it contains only a string value, it marshals to a simple JSON string. -type StringSource struct { - // StringSourceSpec specifies the string value, or external location - StringSourceSpec `json:",inline"` -} - -// StringSourceSpec specifies a string value, or external location -type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. - Value string `json:"value"` - - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. - Env string `json:"env"` - - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. - File string `json:"file"` - - // KeyFile references a file containing the key to use to decrypt the value. - KeyFile string `json:"keyFile"` -} - -// RemoteConnectionInfo holds information necessary for establishing a remote connection -type RemoteConnectionInfo struct { - // URL is the remote URL to connect to - URL string `json:"url"` - // CA is the CA for verifying TLS connections - CA string `json:"ca"` - // CertInfo is the TLS client cert information to present - // this is anonymous so that we can inline it for serialization - CertInfo `json:",inline"` -} - -type AdmissionConfig struct { - PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` - - // enabledPlugins is a list of admission plugins that must be on in addition to the default list. - // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon - // and can result in performance penalties and unexpected behavior. - EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` - - // disabledPlugins is a list of admission plugins that must be off. Putting something in this list - // is almost always a mistake and likely to result in cluster instability. - DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` -} - -// AdmissionPluginConfig holds the necessary configuration options for admission plugins -type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's - // configuration - Location string `json:"location"` - - // Configuration is an embedded configuration object to be used as the plugin's - // configuration. If present, it will be used instead of the path to the configuration file. - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - Configuration runtime.RawExtension `json:"configuration"` -} - -type LogFormatType string - -type WebHookModeType string - -const ( - // LogFormatLegacy saves event in 1-line text format. - LogFormatLegacy LogFormatType = "legacy" - // LogFormatJson saves event in structured json format. - LogFormatJson LogFormatType = "json" - - // WebHookModeBatch indicates that the webhook should buffer audit events - // internally, sending batch updates either once a certain number of - // events have been received or a certain amount of time has passed. - WebHookModeBatch WebHookModeType = "batch" - // WebHookModeBlocking causes the webhook to block on every attempt to process - // a set of events. This causes requests to the API server to wait for a - // round trip to the external audit service before sending a response. - WebHookModeBlocking WebHookModeType = "blocking" -) - -// AuditConfig holds configuration for the audit capabilities -type AuditConfig struct { - // If this flag is set, audit log will be printed in the logs. - // The logs contains, method, user and a requested URL. - Enabled bool `json:"enabled"` - // All requests coming to the apiserver will be logged to this file. - AuditFilePath string `json:"auditFilePath"` - // Maximum number of days to retain old log files based on the timestamp encoded in their filename. - MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` - // Maximum number of old log files to retain. - MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` - // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. - MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` - - // PolicyFile is a path to the file that defines the audit policy configuration. - PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used - // as the audit policy configuration. If present, it will be used instead of - // the path to the policy file. - // +nullable - // +kubebuilder:pruning:PreserveUnknownFields - PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` - - // Format of saved audits (legacy or json). - LogFormat LogFormatType `json:"logFormat"` - - // Path to a .kubeconfig formatted file that defines the audit webhook configuration. - WebHookKubeConfig string `json:"webHookKubeConfig"` - // Strategy for sending audit events (block or batch). - WebHookMode WebHookModeType `json:"webHookMode"` -} - -// EtcdConnectionInfo holds information necessary for connecting to an etcd server -type EtcdConnectionInfo struct { - // URLs are the URLs for etcd - URLs []string `json:"urls,omitempty"` - // CA is a file containing trusted roots for the etcd server certificates - CA string `json:"ca"` - // CertInfo is the TLS client cert information for securing communication to etcd - // this is anonymous so that we can inline it for serialization - CertInfo `json:",inline"` -} - -type EtcdStorageConfig struct { - EtcdConnectionInfo `json:",inline"` - - // StoragePrefix is the path within etcd that the OpenShift resources will - // be rooted under. This value, if changed, will mean existing objects in etcd will - // no longer be located. - StoragePrefix string `json:"storagePrefix"` -} - -// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd -type GenericAPIServerConfig struct { - // servingInfo describes how to start serving - ServingInfo HTTPServingInfo `json:"servingInfo"` - - // corsAllowedOrigins - CORSAllowedOrigins []string `json:"corsAllowedOrigins"` - - // auditConfig describes how to configure audit information - AuditConfig AuditConfig `json:"auditConfig"` - - // storageConfig contains information about how to use - StorageConfig EtcdStorageConfig `json:"storageConfig"` - - // admissionConfig holds information about how to configure admission. - AdmissionConfig AdmissionConfig `json:"admission"` - - KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` -} - -type KubeClientConfig struct { - // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config - KubeConfig string `json:"kubeConfig"` - - // connectionOverrides specifies client overrides for system components to loop back to this master. - ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` -} - -type ClientConnectionOverrides struct { - // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the - // default value of 'application/json'. This field will control all connections to the server used by a particular - // client. - AcceptContentTypes string `json:"acceptContentTypes"` - // contentType is the content type used when sending data to the server from this client. - ContentType string `json:"contentType"` - - // qps controls the number of queries per second allowed for this connection. - QPS float32 `json:"qps"` - // burst allows extra queries to accumulate when a client is exceeding its rate. - Burst int32 `json:"burst"` -} - -// GenericControllerConfig provides information to configure a controller -type GenericControllerConfig struct { - // ServingInfo is the HTTP serving information for the controller's endpoints - ServingInfo HTTPServingInfo `json:"servingInfo"` - - // leaderElection provides information to elect a leader. Only override this if you have a specific need - LeaderElection LeaderElection `json:"leaderElection"` - - // authentication allows configuration of authentication for the endpoints - Authentication DelegatedAuthentication `json:"authentication"` - // authorization allows configuration of authentication for the endpoints - Authorization DelegatedAuthorization `json:"authorization"` -} - -// DelegatedAuthentication allows authentication to be disabled. -type DelegatedAuthentication struct { - // disabled indicates that authentication should be disabled. By default it will use delegated authentication. - Disabled bool `json:"disabled,omitempty"` -} - -// DelegatedAuthorization allows authorization to be disabled. -type DelegatedAuthorization struct { - // disabled indicates that authorization should be disabled. By default it will use delegated authorization. - Disabled bool `json:"disabled,omitempty"` -} -type RequiredHSTSPolicy struct { - // namespaceSelector specifies a label selector such that the policy applies only to those routes that - // are in namespaces with labels that match the selector, and are in one of the DomainPatterns. - // Defaults to the empty LabelSelector, which matches everything. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` - - // domainPatterns is a list of domains for which the desired HSTS annotations are required. - // If domainPatterns is specified and a route is created with a spec.host matching one of the domains, - // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. - // - // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. - // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required - // +required - DomainPatterns []string `json:"domainPatterns"` - - // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. - // If set to 0, it negates the effect, and hosts are removed as HSTS hosts. - // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. - // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS - // policy will eventually expire on that client. - MaxAge MaxAgePolicy `json:"maxAge"` - - // preloadPolicy directs the client to include hosts in its host preload list so that - // it never needs to do an initial load to get the HSTS header (note that this is not defined - // in RFC 6797 and is therefore client implementation-dependent). - // +optional - PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"` - - // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's - // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com - // +optional - IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` -} - -// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy -type MaxAgePolicy struct { - // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age - // This value can be left unspecified, in which case no upper limit is enforced. - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=2147483647 - LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` - - // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age - // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary - // tool for administrators to quickly correct mistakes. - // This value can be left unspecified, in which case no lower limit is enforced. - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=2147483647 - SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` -} - -// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy -// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion -type PreloadPolicy string - -const ( - // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy - RequirePreloadPolicy PreloadPolicy = "RequirePreload" - - // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy - RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload" - - // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy - NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion" -) - -// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy -// for the enclosing RequiredHSTSPolicy -// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion -type IncludeSubDomainsPolicy string - -const ( - // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy - RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains" - - // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy - RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains" - - // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy - NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" -) - -// IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service, -// which are used by MAPI, CIRO, CIO, Installer, etc. -// +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC -type IBMCloudServiceName string - -const ( - // IBMCloudServiceCIS is the name for IBM Cloud CIS. - IBMCloudServiceCIS IBMCloudServiceName = "CIS" - // IBMCloudServiceCOS is the name for IBM Cloud COS. - IBMCloudServiceCOS IBMCloudServiceName = "COS" - // IBMCloudServiceCOSConfig is the name for IBM Cloud COS Config service. - IBMCloudServiceCOSConfig IBMCloudServiceName = "COSConfig" - // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services. - IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices" - // IBMCloudServiceGlobalCatalog is the name for IBM Cloud Global Catalog service. - IBMCloudServiceGlobalCatalog IBMCloudServiceName = "GlobalCatalog" - // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search. - IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch" - // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging. - IBMCloudServiceGlobalTagging IBMCloudServiceName = "GlobalTagging" - // IBMCloudServiceHyperProtect is the name for IBM Cloud Hyper Protect. - IBMCloudServiceHyperProtect IBMCloudServiceName = "HyperProtect" - // IBMCloudServiceIAM is the name for IBM Cloud IAM. - IBMCloudServiceIAM IBMCloudServiceName = "IAM" - // IBMCloudServiceKeyProtect is the name for IBM Cloud Key Protect. - IBMCloudServiceKeyProtect IBMCloudServiceName = "KeyProtect" - // IBMCloudServiceResourceController is the name for IBM Cloud Resource Controller. - IBMCloudServiceResourceController IBMCloudServiceName = "ResourceController" - // IBMCloudServiceResourceManager is the name for IBM Cloud Resource Manager. - IBMCloudServiceResourceManager IBMCloudServiceName = "ResourceManager" - // IBMCloudServiceVPC is the name for IBM Cloud VPC. - IBMCloudServiceVPC IBMCloudServiceName = "VPC" -) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go deleted file mode 100644 index d815556d..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ /dev/null @@ -1,227 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// APIServer holds configuration (like serving certificates, client CA and CORS domains) -// shared by all API servers in the system, among them especially kube-apiserver -// and openshift-apiserver. The canonical name of an instance is 'cluster'. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=apiservers,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type APIServer struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec APIServerSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status APIServerStatus `json:"status"` -} - -type APIServerSpec struct { - // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates - // will be used for serving secure traffic. - // +optional - ServingCerts APIServerServingCerts `json:"servingCerts"` - // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for - // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. - // You usually only have to set this if you have your own PKI you wish to honor client certificates from. - // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. - // +optional - ClientCA ConfigMapNameReference `json:"clientCA"` - // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the - // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth - // server from JavaScript applications. - // The values are regular expressions that correspond to the Golang regular expression language. - // +optional - AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` - // encryption allows the configuration of encryption of resources at the datastore layer. - // +optional - Encryption APIServerEncryption `json:"encryption"` - // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. - // - // If unset, a default (which may change between releases) is chosen. Note that only Old, - // Intermediate and Custom profiles are currently supported, and the maximum available - // minTLSVersion is VersionTLS12. - // +optional - TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` - // audit specifies the settings for audit configuration to be applied to all OpenShift-provided - // API servers in the cluster. - // +optional - // +kubebuilder:default={profile: Default} - Audit Audit `json:"audit"` -} - -// AuditProfileType defines the audit policy profile type. -// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None -type AuditProfileType string - -const ( - // "None" disables audit logs. - NoneAuditProfileType AuditProfileType = "None" - - // "Default" is the existing default audit configuration policy. - DefaultAuditProfileType AuditProfileType = "Default" - - // "WriteRequestBodies" is similar to Default but it logs request and response - // HTTP payloads for write requests (create, update, patch) - WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" - - // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request - // and response HTTP payloads for read requests (get, list). - AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" -) - -type Audit struct { - // profile specifies the name of the desired top-level audit profile to be applied to all requests - // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, - // openshift-apiserver and oauth-apiserver), with the exception of those requests that match - // one or more of the customRules. - // - // The following profiles are provided: - // - Default: default policy which means MetaData level logging with the exception of events - // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody - // level). - // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for - // write requests (create, update, patch). - // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response - // HTTP payloads for read requests (get, list). - // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. - // - // Warning: It is not recommended to disable audit logging by using the `None` profile unless you - // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. - // If you disable audit logging and a support situation arises, you might need to enable audit logging - // and reproduce the issue in order to troubleshoot properly. - // - // If unset, the 'Default' profile is used as the default. - // - // +kubebuilder:default=Default - Profile AuditProfileType `json:"profile,omitempty"` - // customRules specify profiles per group. These profile take precedence over the - // top-level profile field if they apply. They are evaluation from top to bottom and - // the first one that matches, applies. - // +listType=map - // +listMapKey=group - // +optional - CustomRules []AuditCustomRule `json:"customRules,omitempty"` -} - -// AuditCustomRule describes a custom rule for an audit profile that takes precedence over -// the top-level profile. -type AuditCustomRule struct { - // group is a name of group a request user must be member of in order to this profile to apply. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +required - Group string `json:"group"` - // profile specifies the name of the desired audit policy configuration to be deployed to - // all OpenShift-provided API servers in the cluster. - // - // The following profiles are provided: - // - Default: the existing default policy. - // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for - // write requests (create, update, patch). - // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response - // HTTP payloads for read requests (get, list). - // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. - // - // If unset, the 'Default' profile is used as the default. - // - // +kubebuilder:validation:Required - // +required - Profile AuditProfileType `json:"profile,omitempty"` -} - -type APIServerServingCerts struct { - // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. - // If no named certificates are provided, or no named certificates match the server name as understood by a client, - // the defaultServingCertificate will be used. - // +optional - NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` -} - -// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. -type APIServerNamedServingCert struct { - // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to - // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. - // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. - // +optional - Names []string `json:"names,omitempty"` - // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. - // The secret must exist in the openshift-config namespace and contain the following required fields: - // - Secret.Data["tls.key"] - TLS private key. - // - Secret.Data["tls.crt"] - TLS certificate. - ServingCertificate SecretNameReference `json:"servingCertificate"` -} - -type APIServerEncryption struct { - // type defines what encryption type should be used to encrypt resources at the datastore layer. - // When this field is unset (i.e. when it is set to the empty string), identity is implied. - // The behavior of unset can and will change over time. Even if encryption is enabled by default, - // the meaning of unset may change to a different encryption type based on changes in best practices. - // - // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. - // This list of sensitive resources can and will change over time. The current authoritative list is: - // - // 1. secrets - // 2. configmaps - // 3. routes.route.openshift.io - // 4. oauthaccesstokens.oauth.openshift.io - // 5. oauthauthorizetokens.oauth.openshift.io - // - // +unionDiscriminator - // +optional - Type EncryptionType `json:"type,omitempty"` -} - -// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm -type EncryptionType string - -const ( - // identity refers to a type where no encryption is performed at the datastore layer. - // Resources are written as-is without encryption. - EncryptionTypeIdentity EncryptionType = "identity" - - // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key - // is used to perform encryption at the datastore layer. - EncryptionTypeAESCBC EncryptionType = "aescbc" - - // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key - // is used to perform encryption at the datastore layer. - EncryptionTypeAESGCM EncryptionType = "aesgcm" -) - -type APIServerStatus struct { -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type APIServerList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - Items []APIServer `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go deleted file mode 100644 index f6f0c12a..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ /dev/null @@ -1,483 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - -// Authentication specifies cluster-wide settings for authentication (like OAuth and -// webhook token authenticators). The canonical name of an instance is `cluster`. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=authentications,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Authentication struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec AuthenticationSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status AuthenticationStatus `json:"status"` -} - -type AuthenticationSpec struct { - // type identifies the cluster managed, user facing authentication mode in use. - // Specifically, it manages the component that responds to login attempts. - // The default is IntegratedOAuth. - // +optional - Type AuthenticationType `json:"type"` - - // oauthMetadata contains the discovery endpoint data for OAuth 2.0 - // Authorization Server Metadata for an external OAuth server. - // This discovery document can be viewed from its served location: - // oc get --raw '/.well-known/oauth-authorization-server' - // For further details, see the IETF Draft: - // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - // If oauthMetadata.name is non-empty, this value has precedence - // over any metadata reference stored in status. - // The key "oauthMetadata" is used to locate the data. - // If specified and the config map or expected key is not found, no metadata is served. - // If the specified metadata is not valid, no metadata is served. - // The namespace for this config map is openshift-config. - // +optional - OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` - - // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. - // +listType=atomic - WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` - - // webhookTokenAuthenticator configures a remote token reviewer. - // These remote authentication webhooks can be used to verify bearer tokens - // via the tokenreviews.authentication.k8s.io REST API. This is required to - // honor bearer tokens that are provisioned by an external authentication service. - // - // Can only be set if "Type" is set to "None". - // - // +optional - WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` - - // serviceAccountIssuer is the identifier of the bound service account token - // issuer. - // The default is https://kubernetes.default.svc - // WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - // previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - // be trusted for a time period chosen by the platform (currently set to 24h). - // This time period is subject to change over time. - // This allows internal components to transition to use new service account issuer without service distruption. - // +optional - ServiceAccountIssuer string `json:"serviceAccountIssuer"` - - // OIDCProviders are OIDC identity providers that can issue tokens - // for this cluster - // Can only be set if "Type" is set to "OIDC". - // - // At most one provider can be configured. - // - // +listType=map - // +listMapKey=name - // +kubebuilder:validation:MaxItems=1 - // +openshift:enable:FeatureGate=ExternalOIDC - OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` -} - -type AuthenticationStatus struct { - // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - // Authorization Server Metadata for the in-cluster integrated OAuth server. - // This discovery document can be viewed from its served location: - // oc get --raw '/.well-known/oauth-authorization-server' - // For further details, see the IETF Draft: - // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - // This contains the observed value based on cluster state. - // An explicitly set value in spec.oauthMetadata has precedence over this field. - // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - // The key "oauthMetadata" is used to locate the data. - // If the config map or expected key is not found, no metadata is served. - // If the specified metadata is not valid, no metadata is served. - // The namespace for this config map is openshift-config-managed. - IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - - // OIDCClients is where participating operators place the current OIDC client status - // for OIDC clients that can be customized by the cluster-admin. - // - // +listType=map - // +listMapKey=componentNamespace - // +listMapKey=componentName - // +kubebuilder:validation:MaxItems=20 - // +openshift:enable:FeatureGate=ExternalOIDC - OIDCClients []OIDCClientStatus `json:"oidcClients"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type AuthenticationList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Authentication `json:"items"` -} - -// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth -// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC -type AuthenticationType string - -const ( - // None means that no cluster managed authentication system is in place. - // Note that user login will only work if a manually configured system is in place and - // referenced in authentication spec via oauthMetadata and - // webhookTokenAuthenticator/oidcProviders - AuthenticationTypeNone AuthenticationType = "None" - - // IntegratedOAuth refers to the cluster managed OAuth server. - // It is configured via the top level OAuth config. - AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" - - // AuthenticationTypeOIDC refers to a configuration with an external - // OIDC server configured directly with the kube-apiserver. - AuthenticationTypeOIDC AuthenticationType = "OIDC" -) - -// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. -// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. -type DeprecatedWebhookTokenAuthenticator struct { - // kubeConfig contains kube config file data which describes how to access the remote webhook service. - // For further details, see: - // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - // The key "kubeConfig" is used to locate the data. - // If the secret or expected key is not found, the webhook is not honored. - // If the specified kube config data is not valid, the webhook is not honored. - // The namespace for this secret is determined by the point of use. - KubeConfig SecretNameReference `json:"kubeConfig"` -} - -// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator -type WebhookTokenAuthenticator struct { - // kubeConfig references a secret that contains kube config file data which - // describes how to access the remote webhook service. - // The namespace for the referenced secret is openshift-config. - // - // For further details, see: - // - // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - // - // The key "kubeConfig" is used to locate the data. - // If the secret or expected key is not found, the webhook is not honored. - // If the specified kube config data is not valid, the webhook is not honored. - // +kubebuilder:validation:Required - // +required - KubeConfig SecretNameReference `json:"kubeConfig"` -} - -const ( - // OAuthMetadataKey is the key for the oauth authorization server metadata - OAuthMetadataKey = "oauthMetadata" - - // KubeConfigKey is the key for the kube config file data in a secret - KubeConfigKey = "kubeConfig" -) - -type OIDCProvider struct { - // Name of the OIDC provider - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - // Issuer describes atributes of the OIDC token issuer - // - // +kubebuilder:validation:Required - // +required - Issuer TokenIssuer `json:"issuer"` - - // OIDCClients contains configuration for the platform's clients that - // need to request tokens from the issuer - // - // +listType=map - // +listMapKey=componentNamespace - // +listMapKey=componentName - // +kubebuilder:validation:MaxItems=20 - OIDCClients []OIDCClientConfig `json:"oidcClients"` - - // ClaimMappings describes rules on how to transform information from an - // ID token into a cluster identity - ClaimMappings TokenClaimMappings `json:"claimMappings"` - - // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. - // - // +listType=atomic - ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` -} - -// +kubebuilder:validation:MinLength=1 -type TokenAudience string - -type TokenIssuer struct { - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. - // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required - // +required - URL string `json:"issuerURL"` - - // Audiences is an array of audiences that the token was issued for. - // Valid tokens must include at least one of these values in their - // "aud" claim. - // Must be set to exactly one value. - // - // +listType=set - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=10 - // +required - Audiences []TokenAudience `json:"audiences"` - - // CertificateAuthority is a reference to a config map in the - // configuration namespace. The .data of the configMap must contain - // the "ca-bundle.crt" key. - // If unset, system trust is used instead. - CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` -} - -type TokenClaimMappings struct { - // Username is a name of the claim that should be used to construct - // usernames for the cluster identity. - // - // Default value: "sub" - Username UsernameClaimMapping `json:"username,omitempty"` - - // Groups is a name of the claim that should be used to construct - // groups for the cluster identity. - // The referenced claim must use array of strings values. - Groups PrefixedClaimMapping `json:"groups,omitempty"` -} - -type TokenClaimMapping struct { - // Claim is a JWT token claim to be used in the mapping - // - // +kubebuilder:validation:Required - // +required - Claim string `json:"claim"` -} - -type OIDCClientConfig struct { - // ComponentName is the name of the component that is supposed to consume this - // client configuration - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required - // +required - ComponentName string `json:"componentName"` - - // ComponentNamespace is the namespace of the component that is supposed to consume this - // client configuration - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required - // +required - ComponentNamespace string `json:"componentNamespace"` - - // ClientID is the identifier of the OIDC client from the OIDC provider - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - ClientID string `json:"clientID"` - - // ClientSecret refers to a secret in the `openshift-config` namespace that - // contains the client secret in the `clientSecret` key of the `.data` field - ClientSecret SecretNameReference `json:"clientSecret"` - - // ExtraScopes is an optional set of scopes to request tokens with. - // - // +listType=set - ExtraScopes []string `json:"extraScopes"` -} - -type OIDCClientStatus struct { - // ComponentName is the name of the component that will consume a client configuration. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required - // +required - ComponentName string `json:"componentName"` - - // ComponentNamespace is the namespace of the component that will consume a client configuration. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required - // +required - ComponentNamespace string `json:"componentNamespace"` - - // CurrentOIDCClients is a list of clients that the component is currently using. - // - // +listType=map - // +listMapKey=issuerURL - // +listMapKey=clientID - CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - - // ConsumingUsers is a slice of ServiceAccounts that need to have read - // permission on the `clientSecret` secret. - // - // +kubebuilder:validation:MaxItems=5 - // +listType=set - ConsumingUsers []ConsumingUser `json:"consumingUsers"` - - // Conditions are used to communicate the state of the `oidcClients` entry. - // - // Supported conditions include Available, Degraded and Progressing. - // - // If Available is true, the component is successfully using the configured client. - // If Degraded is true, that means something has gone wrong trying to handle the client configuration. - // If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - // - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty"` -} - -type OIDCClientReference struct { - // OIDCName refers to the `name` of the provider from `oidcProviders` - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - OIDCProviderName string `json:"oidcProviderName"` - - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. - // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required - // +required - IssuerURL string `json:"issuerURL"` - - // ClientID is the identifier of the OIDC client from the OIDC provider - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - ClientID string `json:"clientID"` -} - -// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" -type UsernameClaimMapping struct { - TokenClaimMapping `json:",inline"` - - // PrefixPolicy specifies how a prefix should apply. - // - // By default, claims other than `email` will be prefixed with the issuer URL to - // prevent naming clashes with other plugins. - // - // Set to "NoPrefix" to disable prefixing. - // - // Example: - // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". - // If the JWT claim `username` contains value `userA`, the resulting - // mapped value will be "myoidc:userA". - // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the - // JWT `email` claim contains value "userA@myoidc.tld", the resulting - // mapped value will be "myoidc:userA@myoidc.tld". - // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - // and `claim` is set to: - // (a) "username": the mapped value will be "https://myoidc.tld#userA" - // (b) "email": the mapped value will be "userA@myoidc.tld" - // - // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} - PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` - - Prefix *UsernamePrefix `json:"prefix"` -} - -type UsernamePrefixPolicy string - -var ( - // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix - // If the username claim is anything else, it is prefixed by the issuerURL - NoOpinion UsernamePrefixPolicy = "" - - // NoPrefix means the username claim value will not have any prefix - NoPrefix UsernamePrefixPolicy = "NoPrefix" - - // Prefix means the prefix value must be specified. It cannot be empty - Prefix UsernamePrefixPolicy = "Prefix" -) - -type UsernamePrefix struct { - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +required - PrefixString string `json:"prefixString"` -} - -type PrefixedClaimMapping struct { - TokenClaimMapping `json:",inline"` - - // Prefix is a string to prefix the value from the token in the result of the - // claim mapping. - // - // By default, no prefixing occurs. - // - // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains - // an array of strings "a", "b" and "c", the mapping will result in an - // array of string "myoidc:a", "myoidc:b" and "myoidc:c". - Prefix string `json:"prefix"` -} - -type TokenValidationRuleType string - -const ( - TokenValidationRuleTypeRequiredClaim = "RequiredClaim" -) - -type TokenClaimValidationRule struct { - // Type sets the type of the validation rule - // - // +kubebuilder:validation:Enum={"RequiredClaim"} - // +kubebuilder:default="RequiredClaim" - Type TokenValidationRuleType `json:"type"` - - // RequiredClaim allows configuring a required claim name and its expected - // value - RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` -} - -type TokenRequiredClaim struct { - // Claim is a name of a required claim. Only claims with string values are - // supported. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - Claim string `json:"claim"` - - // RequiredValue is the required value for the claim. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required - // +required - RequiredValue string `json:"requiredValue"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go deleted file mode 100644 index dad47666..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ /dev/null @@ -1,133 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Build configures the behavior of OpenShift builds for the entire cluster. -// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. -// -// The canonical name is "cluster" -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01 -// +openshift:capability=Build -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=builds,scope=Cluster -// +kubebuilder:subresource:status -type Build struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds user-settable values for the build controller configuration - // +kubebuilder:validation:Required - // +required - Spec BuildSpec `json:"spec"` -} - -type BuildSpec struct { - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that - // should be trusted for image pushes and pulls during builds. - // The namespace for this config map is openshift-config. - // - // DEPRECATED: Additional CAs for image pull and push should be set on - // image.config.openshift.io/cluster instead. - // - // +optional - AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - // BuildDefaults controls the default information for Builds - // +optional - BuildDefaults BuildDefaults `json:"buildDefaults"` - // BuildOverrides controls override settings for builds - // +optional - BuildOverrides BuildOverrides `json:"buildOverrides"` -} - -type BuildDefaults struct { - // DefaultProxy contains the default proxy settings for all build operations, including image pull/push - // and source download. - // - // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables - // in the build config's strategy. - // +optional - DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` - - // GitProxy contains the proxy settings for git operations only. If set, this will override - // any Proxy settings for all git commands, such as git clone. - // - // Values that are not set here will be inherited from DefaultProxy. - // +optional - GitProxy *ProxySpec `json:"gitProxy,omitempty"` - - // Env is a set of default environment variables that will be applied to the - // build if the specified variables do not exist on the build - // +optional - Env []corev1.EnvVar `json:"env,omitempty"` - - // ImageLabels is a list of docker labels that are applied to the resulting image. - // User can override a default label by providing a label with the same name in their - // Build/BuildConfig. - // +optional - ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - - // Resources defines resource requirements to execute the build. - // +optional - Resources corev1.ResourceRequirements `json:"resources"` -} - -type ImageLabel struct { - // Name defines the name of the label. It must have non-zero length. - Name string `json:"name"` - - // Value defines the literal value of the label. - // +optional - Value string `json:"value,omitempty"` -} - -type BuildOverrides struct { - // ImageLabels is a list of docker labels that are applied to the resulting image. - // If user provided a label in their Build/BuildConfig with the same name as one in this - // list, the user's label will be overwritten. - // +optional - ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - - // NodeSelector is a selector which must be true for the build pod to fit on a node - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Tolerations is a list of Tolerations that will override any existing - // tolerations set on a build pod. - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - - // ForcePull overrides, if set, the equivalent value in the builds, - // i.e. false disables force pull for all builds, - // true enables force pull for all builds, - // independently of what each build specifies itself - // +optional - ForcePull *bool `json:"forcePull,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type BuildList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Build `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go deleted file mode 100644 index 7951762c..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ /dev/null @@ -1,227 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterOperator is the Custom Resource object which holds the current state -// of an operator. This object is used by operators to convey their state to -// the rest of the cluster. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 -// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at. -// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable. -// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes. -// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded. -// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed. -// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true -type ClusterOperator struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata"` - - // spec holds configuration that could apply to any operator. - // +kubebuilder:validation:Required - // +required - Spec ClusterOperatorSpec `json:"spec"` - - // status holds the information about the state of an operator. It is consistent with status information across - // the Kubernetes ecosystem. - // +optional - Status ClusterOperatorStatus `json:"status"` -} - -// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". -type ClusterOperatorSpec struct { -} - -// ClusterOperatorStatus provides information about the status of the operator. -// +k8s:deepcopy-gen=true -type ClusterOperatorStatus struct { - // conditions describes the state of the operator's managed and monitored components. - // +patchMergeKey=type - // +patchStrategy=merge - // +optional - Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple - // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". - // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. - // +optional - Versions []OperandVersion `json:"versions,omitempty"` - - // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: - // 1. the detailed resource driving the operator - // 2. operator namespaces - // 3. operand namespaces - // +optional - RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` - - // extension contains any additional status information specific to the - // operator which owns this status object. - // +nullable - // +optional - // +kubebuilder:pruning:PreserveUnknownFields - Extension runtime.RawExtension `json:"extension"` -} - -type OperandVersion struct { - // name is the name of the particular operand this version is for. It usually matches container images, not operators. - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - - // version indicates which version of a particular operand is currently being managed. It must always match the Available - // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout - // 1.1.0 - // +kubebuilder:validation:Required - // +required - Version string `json:"version"` -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -type ObjectReference struct { - // group of the referent. - // +kubebuilder:validation:Required - // +required - Group string `json:"group"` - // resource of the referent. - // +kubebuilder:validation:Required - // +required - Resource string `json:"resource"` - // namespace of the referent. - // +optional - Namespace string `json:"namespace,omitempty"` - // name of the referent. - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` -} - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// ClusterOperatorStatusCondition represents the state of the operator's -// managed and monitored components. -// +k8s:deepcopy-gen=true -type ClusterOperatorStatusCondition struct { - // type specifies the aspect reported by this condition. - // +kubebuilder:validation:Required - // +required - Type ClusterStatusConditionType `json:"type"` - - // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required - // +required - Status ConditionStatus `json:"status"` - - // lastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required - // +required - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - - // reason is the CamelCase reason for the condition's current status. - // +optional - Reason string `json:"reason,omitempty"` - - // message provides additional information about the current condition. - // This is only to be consumed by humans. It may contain Line Feed - // characters (U+000A), which should be rendered as new lines. - // +optional - Message string `json:"message,omitempty"` -} - -// ClusterStatusConditionType is an aspect of operator state. -type ClusterStatusConditionType string - -const ( - // Available indicates that the component (operator and all configured operands) - // is functional and available in the cluster. Available=False means at least - // part of the component is non-functional, and that the condition requires - // immediate administrator intervention. - OperatorAvailable ClusterStatusConditionType = "Available" - - // Progressing indicates that the component (operator and all configured operands) - // is actively rolling out new code, propagating config changes, or otherwise - // moving from one steady state to another. Operators should not report - // progressing when they are reconciling (without action) a previously known - // state. If the observed cluster state has changed and the component is - // reacting to it (scaling up for instance), Progressing should become true - // since it is moving from one steady state to another. - OperatorProgressing ClusterStatusConditionType = "Progressing" - - // Degraded indicates that the component (operator and all configured operands) - // does not match its desired state over a period of time resulting in a lower - // quality of service. The period of time may vary by component, but a Degraded - // state represents persistent observation of a condition. As a result, a - // component should not oscillate in and out of Degraded state. A component may - // be Available even if its degraded. For example, a component may desire 3 - // running pods, but 1 pod is crash-looping. The component is Available but - // Degraded because it may have a lower quality of service. A component may be - // Progressing but not Degraded because the transition from one state to - // another does not persist over a long enough period to report Degraded. A - // component should not report Degraded during the course of a normal upgrade. - // A component may report Degraded in response to a persistent infrastructure - // failure that requires eventual administrator intervention. For example, if - // a control plane host is unhealthy and must be replaced. A component should - // report Degraded if unexpected errors occur over a period, but the - // expectation is that all unexpected errors are handled as operators mature. - OperatorDegraded ClusterStatusConditionType = "Degraded" - - // Upgradeable indicates whether the component (operator and all configured - // operands) is safe to upgrade based on the current cluster state. When - // Upgradeable is False, the cluster-version operator will prevent the - // cluster from performing impacted updates unless forced. When set on - // ClusterVersion, the message will explain which updates (minor or patch) - // are impacted. When set on ClusterOperator, False will block minor - // OpenShift updates. The message field should contain a human readable - // description of what the administrator should do to allow the cluster or - // component to successfully update. The cluster-version operator will - // allow updates when this condition is not False, including when it is - // missing, True, or Unknown. - OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" - - // EvaluationConditionsDetected is used to indicate the result of the detection - // logic that was added to a component to evaluate the introduction of an - // invasive change that could potentially result in highly visible alerts, - // breakages or upgrade failures. You can concatenate multiple Reason using - // the "::" delimiter if you need to evaluate the introduction of multiple changes. - EvaluationConditionsDetected ClusterStatusConditionType = "EvaluationConditionsDetected" -) - -// ClusterOperatorList is a list of OperatorStatus resources. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:compatibility-gen:level=1 -type ClusterOperatorList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []ClusterOperator `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go deleted file mode 100644 index 707af9d8..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ /dev/null @@ -1,867 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterVersion is the configuration for the ClusterVersionOperator. This is where -// parameters related to automatic updates can be set. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495 -// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=clusterversions,scope=Cluster -// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability" -// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string -// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string -// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string -// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date -// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string -// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true -type ClusterVersion struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec is the desired state of the cluster version - the operator will work - // to ensure that the desired version is applied to the cluster. - // +kubebuilder:validation:Required - // +required - Spec ClusterVersionSpec `json:"spec"` - // status contains information about the available updates and any in-progress - // updates. - // +optional - Status ClusterVersionStatus `json:"status"` -} - -// ClusterVersionSpec is the desired version state of the cluster. It includes -// the version the cluster should be at, how the cluster is identified, and -// where the cluster should look for version updates. -// +k8s:deepcopy-gen=true -type ClusterVersionSpec struct { - // clusterID uniquely identifies this cluster. This is expected to be - // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in - // hexadecimal values). This is a required field. - // +kubebuilder:validation:Required - // +required - ClusterID ClusterID `json:"clusterID"` - - // desiredUpdate is an optional field that indicates the desired value of - // the cluster version. Setting this value will trigger an upgrade (if - // the current version does not match the desired version). The set of - // recommended update values is listed as part of available updates in - // status, and setting values outside that range may cause the upgrade - // to fail. - // - // Some of the fields are inter-related with restrictions and meanings described here. - // 1. image is specified, version is specified, architecture is specified. API validation error. - // 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. - // 3. image is specified, version is not specified, architecture is specified. API validation error. - // 4. image is specified, version is not specified, architecture is not specified. image is used. - // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. - // 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. - // 7. image is not specified, version is not specified, architecture is specified. API validation error. - // 8. image is not specified, version is not specified, architecture is not specified. API validation error. - // - // If an upgrade fails the operator will halt and report status - // about the failing component. Setting the desired update value back to - // the previous version will cause a rollback to be attempted. Not all - // rollbacks will succeed. - // - // +optional - DesiredUpdate *Update `json:"desiredUpdate,omitempty"` - - // upstream may be used to specify the preferred update server. By default - // it will use the appropriate update server for the cluster and region. - // - // +optional - Upstream URL `json:"upstream,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be - // contain stable updates that are appropriate for production clusters. - // - // +optional - Channel string `json:"channel,omitempty"` - - // capabilities configures the installation of optional, core - // cluster components. A null value here is identical to an - // empty object; see the child properties for default semantics. - // +optional - Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"` - - // signatureStores contains the upstream URIs to verify release signatures and optional - // reference to a config map by name containing the PEM-encoded CA bundle. - // - // By default, CVO will use existing signature stores if this property is empty. - // The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature - // in these stores in parallel only when local ConfigMaps did not include a valid signature. - // Validation will fail if none of the signature stores reply with valid signature before timeout. - // Setting signatureStores will replace the default signature stores with custom signature stores. - // Default stores can be used with custom signature stores by adding them manually. - // - // A maximum of 32 signature stores may be configured. - // +kubebuilder:validation:MaxItems=32 - // +openshift:enable:FeatureGate=SignatureStores - // +listType=map - // +listMapKey=url - // +optional - SignatureStores []SignatureStore `json:"signatureStores"` - - // overrides is list of overides for components that are managed by - // cluster version operator. Marking a component unmanaged will prevent - // the operator from creating or updating the object. - // +listType=map - // +listMapKey=kind - // +listMapKey=group - // +listMapKey=namespace - // +listMapKey=name - // +optional - Overrides []ComponentOverride `json:"overrides,omitempty"` -} - -// ClusterVersionStatus reports the status of the cluster versioning, -// including any upgrades that are in progress. The current field will -// be set to whichever version the cluster is reconciling to, and the -// conditions array will report whether the update succeeded, is in -// progress, or is failing. -// +k8s:deepcopy-gen=true -type ClusterVersionStatus struct { - // desired is the version that the cluster is reconciling towards. - // If the cluster is not yet fully initialized desired will be set - // with the information available, which may be an image or a tag. - // +kubebuilder:validation:Required - // +required - Desired Release `json:"desired"` - - // history contains a list of the most recent versions applied to the cluster. - // This value may be empty during cluster startup, and then will be updated - // when a new update is being applied. The newest update is first in the - // list and it is ordered by recency. Updates in the history have state - // Completed if the rollout completed - if an update was failing or halfway - // applied the state will be Partial. Only a limited amount of update history - // is preserved. - // +listType=atomic - // +optional - History []UpdateHistory `json:"history,omitempty"` - - // observedGeneration reports which version of the spec is being synced. - // If this value is not equal to metadata.generation, then the desired - // and conditions fields may represent a previous version. - // +kubebuilder:validation:Required - // +required - ObservedGeneration int64 `json:"observedGeneration"` - - // versionHash is a fingerprint of the content that the cluster will be - // updated with. It is used by the operator to avoid unnecessary work - // and is for internal use only. - // +kubebuilder:validation:Required - // +required - VersionHash string `json:"versionHash"` - - // capabilities describes the state of optional, core cluster components. - Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` - - // conditions provides information about the cluster version. The condition - // "Available" is set to true if the desiredUpdate has been reached. The - // condition "Progressing" is set to true if an update is being applied. - // The condition "Degraded" is set to true if an update is currently blocked - // by a temporary or permanent error. Conditions are only valid for the - // current desiredUpdate when metadata.generation is equal to - // status.generation. - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - // +optional - Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // availableUpdates contains updates recommended for this - // cluster. Updates which appear in conditionalUpdates but not in - // availableUpdates may expose this cluster to known issues. This list - // may be empty if no updates are recommended, if the update service - // is unavailable, or if an invalid channel has been specified. - // +nullable - // +kubebuilder:validation:Required - // +listType=atomic - // +required - AvailableUpdates []Release `json:"availableUpdates"` - - // conditionalUpdates contains the list of updates that may be - // recommended for this cluster if it meets specific required - // conditions. Consumers interested in the set of updates that are - // actually recommended for this cluster should use - // availableUpdates. This list may be empty if no updates are - // recommended, if the update service is unavailable, or if an empty - // or invalid channel has been specified. - // +listType=atomic - // +optional - ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` -} - -// UpdateState is a constant representing whether an update was successfully -// applied to the cluster or not. -type UpdateState string - -const ( - // CompletedUpdate indicates an update was successfully applied - // to the cluster (all resource updates were successful). - CompletedUpdate UpdateState = "Completed" - // PartialUpdate indicates an update was never completely applied - // or is currently being applied. - PartialUpdate UpdateState = "Partial" -) - -// UpdateHistory is a single attempted update to the cluster. -type UpdateHistory struct { - // state reflects whether the update was fully applied. The Partial state - // indicates the update is not fully applied, while the Completed state - // indicates the update was successfully rolled out at least once (all - // parts of the update successfully applied). - // +kubebuilder:validation:Required - // +required - State UpdateState `json:"state"` - - // startedTime is the time at which the update was started. - // +kubebuilder:validation:Required - // +required - StartedTime metav1.Time `json:"startedTime"` - - // completionTime, if set, is when the update was fully applied. The update - // that is currently being applied will have a null completion time. - // Completion time will always be set for entries that are not the current - // update (usually to the started time of the next update). - // +kubebuilder:validation:Required - // +required - // +nullable - CompletionTime *metav1.Time `json:"completionTime"` - - // version is a semantic version identifying the update version. If the - // requested image does not define a version, or if a failure occurs - // retrieving the image, this value may be empty. - // - // +optional - Version string `json:"version"` - - // image is a container image location that contains the update. This value - // is always populated. - // +kubebuilder:validation:Required - // +required - Image string `json:"image"` - - // verified indicates whether the provided update was properly verified - // before it was installed. If this is false the cluster may not be trusted. - // Verified does not cover upgradeable checks that depend on the cluster - // state at the time when the update target was accepted. - // +kubebuilder:validation:Required - // +required - Verified bool `json:"verified"` - - // acceptedRisks records risks which were accepted to initiate the update. - // For example, it may menition an Upgradeable=False or missing signature - // that was overriden via desiredUpdate.force, or an update that was - // initiated despite not being in the availableUpdates set of recommended - // update targets. - // +optional - AcceptedRisks string `json:"acceptedRisks,omitempty"` -} - -// ClusterID is string RFC4122 uuid. -type ClusterID string - -// ClusterVersionArchitecture enumerates valid cluster architectures. -// +kubebuilder:validation:Enum="Multi";"" -type ClusterVersionArchitecture string - -const ( - // ClusterVersionArchitectureMulti identifies a multi architecture. A multi - // architecture cluster is capable of running nodes with multiple architectures. - ClusterVersionArchitectureMulti ClusterVersionArchitecture = "Multi" -) - -// ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager -type ClusterVersionCapability string - -const ( - // ClusterVersionCapabilityOpenShiftSamples manages the sample - // image streams and templates stored in the openshift - // namespace, and any registry credentials, stored as a secret, - // needed for the image streams to import the images they - // reference. - ClusterVersionCapabilityOpenShiftSamples ClusterVersionCapability = "openshift-samples" - - // ClusterVersionCapabilityBaremetal manages the cluster - // baremetal operator which is responsible for running the metal3 - // deployment. - ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal" - - // ClusterVersionCapabilityMarketplace manages the Marketplace operator which - // supplies Operator Lifecycle Manager (OLM) users with default catalogs of - // "optional" operators. - // - // Note that Marketplace has a hard requirement on OLM. OLM can not be disabled - // while Marketplace is enabled. - ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace" - - // ClusterVersionCapabilityConsole manages the Console operator which - // installs and maintains the web console. - ClusterVersionCapabilityConsole ClusterVersionCapability = "Console" - - // ClusterVersionCapabilityInsights manages the Insights operator which - // collects anonymized information about the cluster to generate - // recommendations for possible cluster issues. - ClusterVersionCapabilityInsights ClusterVersionCapability = "Insights" - - // ClusterVersionCapabilityStorage manages the storage operator which - // is responsible for providing cluster-wide storage defaults - // WARNING: Do not disable this capability when deployed to - // RHEV and OpenStack without reading the docs. - // These clusters heavily rely on that capability and may cause - // damage to the cluster. - ClusterVersionCapabilityStorage ClusterVersionCapability = "Storage" - - // ClusterVersionCapabilityCSISnapshot manages the csi snapshot - // controller operator which is responsible for watching the - // VolumeSnapshot CRD objects and manages the creation and deletion - // lifecycle of volume snapshots - ClusterVersionCapabilityCSISnapshot ClusterVersionCapability = "CSISnapshot" - - // ClusterVersionCapabilityNodeTuning manages the Node Tuning Operator - // which is responsible for watching the Tuned and Profile CRD - // objects and manages the containerized TuneD daemon which controls - // system level tuning of Nodes - ClusterVersionCapabilityNodeTuning ClusterVersionCapability = "NodeTuning" - - // ClusterVersionCapabilityMachineAPI manages - // machine-api-operator - // cluster-autoscaler-operator - // cluster-control-plane-machine-set-operator - // which is responsible for machines configuration and heavily - // targeted for SNO clusters. - // - // The following CRDs are disabled as well - // machines - // machineset - // controlplanemachineset - // - // WARNING: Do not disable that capability without reading - // documentation. This is important part of openshift system - // and may cause cluster damage - ClusterVersionCapabilityMachineAPI ClusterVersionCapability = "MachineAPI" - - // ClusterVersionCapabilityBuild manages the Build API which is responsible - // for watching the Build API objects and managing their lifecycle. - // The functionality is located under openshift-apiserver and openshift-controller-manager. - // - // The following resources are taken into account: - // - builds - // - buildconfigs - ClusterVersionCapabilityBuild ClusterVersionCapability = "Build" - - // ClusterVersionCapabilityDeploymentConfig manages the DeploymentConfig API - // which is responsible for watching the DeploymentConfig API and managing their lifecycle. - // The functionality is located under openshift-apiserver and openshift-controller-manager. - // - // The following resources are taken into account: - // - deploymentconfigs - ClusterVersionCapabilityDeploymentConfig ClusterVersionCapability = "DeploymentConfig" - - // ClusterVersionCapabilityImageRegistry manages the image registry which - // allows to distribute Docker images - ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" - - // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager - // which itself manages the lifecycle of operators - ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" - - // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers - // in openshift cluster - ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" - - // ClusterVersionCapabilityIngress manages the cluster ingress operator - // which is responsible for running the ingress controllers (including OpenShift router). - // - // The following CRDs are part of the capability as well: - // IngressController - // DNSRecord - // GatewayClass - // Gateway - // HTTPRoute - // ReferenceGrant - // - // WARNING: This capability cannot be disabled on the standalone OpenShift. - ClusterVersionCapabilityIngress ClusterVersionCapability = "Ingress" - - // ClusterVersionCapabilityCloudControllerManager manages various Cloud Controller - // Managers deployed on top of OpenShift. They help you to work with cloud - // provider API and embeds cloud-specific control logic. - ClusterVersionCapabilityCloudControllerManager ClusterVersionCapability = "CloudControllerManager" -) - -// KnownClusterVersionCapabilities includes all known optional, core cluster components. -var KnownClusterVersionCapabilities = []ClusterVersionCapability{ - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - ClusterVersionCapabilityBuild, - ClusterVersionCapabilityDeploymentConfig, - ClusterVersionCapabilityImageRegistry, - ClusterVersionCapabilityOperatorLifecycleManager, - ClusterVersionCapabilityCloudCredential, - ClusterVersionCapabilityIngress, - ClusterVersionCapabilityCloudControllerManager, -} - -// ClusterVersionCapabilitySet defines sets of cluster version capabilities. -// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;vCurrent -type ClusterVersionCapabilitySet string - -const ( - // ClusterVersionCapabilitySetNone is an empty set enabling - // no optional capabilities. - ClusterVersionCapabilitySetNone ClusterVersionCapabilitySet = "None" - - // ClusterVersionCapabilitySet4_11 is the recommended set of - // optional capabilities to enable for the 4.11 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_11 ClusterVersionCapabilitySet = "v4.11" - - // ClusterVersionCapabilitySet4_12 is the recommended set of - // optional capabilities to enable for the 4.12 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_12 ClusterVersionCapabilitySet = "v4.12" - - // ClusterVersionCapabilitySet4_13 is the recommended set of - // optional capabilities to enable for the 4.13 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_13 ClusterVersionCapabilitySet = "v4.13" - - // ClusterVersionCapabilitySet4_14 is the recommended set of - // optional capabilities to enable for the 4.14 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" - - // ClusterVersionCapabilitySet4_15 is the recommended set of - // optional capabilities to enable for the 4.15 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15" - - // ClusterVersionCapabilitySet4_16 is the recommended set of - // optional capabilities to enable for the 4.16 version of - // OpenShift. This list will remain the same no matter which - // version of OpenShift is installed. - ClusterVersionCapabilitySet4_16 ClusterVersionCapabilitySet = "v4.16" - - // ClusterVersionCapabilitySetCurrent is the recommended set - // of optional capabilities to enable for the cluster's - // current version of OpenShift. - ClusterVersionCapabilitySetCurrent ClusterVersionCapabilitySet = "vCurrent" -) - -// ClusterVersionCapabilitySets defines sets of cluster version capabilities. -var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVersionCapability{ - ClusterVersionCapabilitySetNone: {}, - ClusterVersionCapabilitySet4_11: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityMachineAPI, - }, - ClusterVersionCapabilitySet4_12: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityMachineAPI, - }, - ClusterVersionCapabilitySet4_13: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - }, - ClusterVersionCapabilitySet4_14: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - ClusterVersionCapabilityBuild, - ClusterVersionCapabilityDeploymentConfig, - ClusterVersionCapabilityImageRegistry, - }, - ClusterVersionCapabilitySet4_15: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - ClusterVersionCapabilityBuild, - ClusterVersionCapabilityDeploymentConfig, - ClusterVersionCapabilityImageRegistry, - ClusterVersionCapabilityOperatorLifecycleManager, - ClusterVersionCapabilityCloudCredential, - }, - ClusterVersionCapabilitySet4_16: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - ClusterVersionCapabilityBuild, - ClusterVersionCapabilityDeploymentConfig, - ClusterVersionCapabilityImageRegistry, - ClusterVersionCapabilityOperatorLifecycleManager, - ClusterVersionCapabilityCloudCredential, - ClusterVersionCapabilityIngress, - ClusterVersionCapabilityCloudControllerManager, - }, - ClusterVersionCapabilitySetCurrent: { - ClusterVersionCapabilityBaremetal, - ClusterVersionCapabilityConsole, - ClusterVersionCapabilityInsights, - ClusterVersionCapabilityMarketplace, - ClusterVersionCapabilityStorage, - ClusterVersionCapabilityOpenShiftSamples, - ClusterVersionCapabilityCSISnapshot, - ClusterVersionCapabilityNodeTuning, - ClusterVersionCapabilityMachineAPI, - ClusterVersionCapabilityBuild, - ClusterVersionCapabilityDeploymentConfig, - ClusterVersionCapabilityImageRegistry, - ClusterVersionCapabilityOperatorLifecycleManager, - ClusterVersionCapabilityCloudCredential, - ClusterVersionCapabilityIngress, - ClusterVersionCapabilityCloudControllerManager, - }, -} - -// ClusterVersionCapabilitiesSpec selects the managed set of -// optional, core cluster components. -// +k8s:deepcopy-gen=true -type ClusterVersionCapabilitiesSpec struct { - // baselineCapabilitySet selects an initial set of - // optional capabilities to enable, which can be extended via - // additionalEnabledCapabilities. If unset, the cluster will - // choose a default, and the default may change over time. - // The current default is vCurrent. - // +optional - BaselineCapabilitySet ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` - - // additionalEnabledCapabilities extends the set of managed - // capabilities beyond the baseline defined in - // baselineCapabilitySet. The default is an empty set. - // +listType=atomic - // +optional - AdditionalEnabledCapabilities []ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` -} - -// ClusterVersionCapabilitiesStatus describes the state of optional, -// core cluster components. -// +k8s:deepcopy-gen=true -type ClusterVersionCapabilitiesStatus struct { - // enabledCapabilities lists all the capabilities that are currently managed. - // +listType=atomic - // +optional - EnabledCapabilities []ClusterVersionCapability `json:"enabledCapabilities,omitempty"` - - // knownCapabilities lists all the capabilities known to the current cluster. - // +listType=atomic - // +optional - KnownCapabilities []ClusterVersionCapability `json:"knownCapabilities,omitempty"` -} - -// ComponentOverride allows overriding cluster version operator's behavior -// for a component. -// +k8s:deepcopy-gen=true -type ComponentOverride struct { - // kind indentifies which object to override. - // +kubebuilder:validation:Required - // +required - Kind string `json:"kind"` - // group identifies the API group that the kind is in. - // +kubebuilder:validation:Required - // +required - Group string `json:"group"` - - // namespace is the component's namespace. If the resource is cluster - // scoped, the namespace should be empty. - // +kubebuilder:validation:Required - // +required - Namespace string `json:"namespace"` - // name is the component's name. - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - - // unmanaged controls if cluster version operator should stop managing the - // resources in this cluster. - // Default: false - // +kubebuilder:validation:Required - // +required - Unmanaged bool `json:"unmanaged"` -} - -// URL is a thin wrapper around string that ensures the string is a valid URL. -type URL string - -// Update represents an administrator update request. -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image" -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set" -// +k8s:deepcopy-gen=true -type Update struct { - // architecture is an optional field that indicates the desired - // value of the cluster architecture. In this context cluster - // architecture means either a single architecture or a multi - // architecture. architecture can only be set to Multi thereby - // only allowing updates from single to multi architecture. If - // architecture is set, image cannot be set and version must be - // set. - // Valid values are 'Multi' and empty. - // - // +optional - Architecture ClusterVersionArchitecture `json:"architecture"` - - // version is a semantic version identifying the update version. - // version is ignored if image is specified and required if - // architecture is specified. - // - // +optional - Version string `json:"version"` - - // image is a container image location that contains the update. - // image should be used when the desired version does not exist in availableUpdates or history. - // When image is set, version is ignored. When image is set, version should be empty. - // When image is set, architecture cannot be specified. - // - // +optional - Image string `json:"image"` - - // force allows an administrator to update to an image that has failed - // verification or upgradeable checks. This option should only - // be used when the authenticity of the provided image has been verified out - // of band because the provided image will run with full administrative access - // to the cluster. Do not use this flag with images that comes from unknown - // or potentially malicious sources. - // - // +optional - Force bool `json:"force"` -} - -// Release represents an OpenShift release image and associated metadata. -// +k8s:deepcopy-gen=true -type Release struct { - // version is a semantic version identifying the update version. When this - // field is part of spec, version is optional if image is specified. - // +required - Version string `json:"version"` - - // image is a container image location that contains the update. When this - // field is part of spec, image is optional if version is specified and the - // availableUpdates field contains a matching version. - // +required - Image string `json:"image"` - - // url contains information about this release. This URL is set by - // the 'url' metadata property on a release or the metadata returned by - // the update API and should be displayed as a link in user - // interfaces. The URL field may not be set for test or nightly - // releases. - // +optional - URL URL `json:"url,omitempty"` - - // channels is the set of Cincinnati channels to which the release - // currently belongs. - // +listType=set - // +optional - Channels []string `json:"channels,omitempty"` -} - -// RetrievedUpdates reports whether available updates have been retrieved from -// the upstream update server. The condition is Unknown before retrieval, False -// if the updates could not be retrieved or recently failed, or True if the -// availableUpdates field is accurate and recent. -const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" - -// ConditionalUpdate represents an update which is recommended to some -// clusters on the version the current cluster is reconciling, but which -// may not be recommended for the current cluster. -type ConditionalUpdate struct { - // release is the target of the update. - // +kubebuilder:validation:Required - // +required - Release Release `json:"release"` - - // risks represents the range of issues associated with - // updating to the target release. The cluster-version - // operator will evaluate all entries, and only recommend the - // update if there is at least one entry and all entries - // recommend the update. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - // +patchMergeKey=name - // +patchStrategy=merge - // +listType=map - // +listMapKey=name - // +required - Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` - - // conditions represents the observations of the conditional update's - // current status. Known types are: - // * Recommended, for whether the update is recommended for the current cluster. - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -// ConditionalUpdateRisk represents a reason and cluster-state -// for not recommending a conditional update. -// +k8s:deepcopy-gen=true -type ConditionalUpdateRisk struct { - // url contains information about this risk. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Format=uri - // +kubebuilder:validation:MinLength=1 - // +required - URL string `json:"url"` - - // name is the CamelCase reason for not recommending a - // conditional update, in the event that matchingRules match the - // cluster state. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +required - Name string `json:"name"` - - // message provides additional information about the risk of - // updating, in the event that matchingRules match the cluster - // state. This is only to be consumed by humans. It may - // contain Line Feed characters (U+000A), which should be - // rendered as new lines. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +required - Message string `json:"message"` - - // matchingRules is a slice of conditions for deciding which - // clusters match the risk and which do not. The slice is - // ordered by decreasing precedence. The cluster-version - // operator will walk the slice in order, and stop after the - // first it can successfully evaluate. If no condition can be - // successfully evaluated, the update will not be recommended. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - // +listType=atomic - // +required - MatchingRules []ClusterCondition `json:"matchingRules"` -} - -// ClusterCondition is a union of typed cluster conditions. The 'type' -// property determines which of the type-specific properties are relevant. -// When evaluated on a cluster, the condition may match, not match, or -// fail to evaluate. -// +k8s:deepcopy-gen=true -type ClusterCondition struct { - // type represents the cluster-condition type. This defines - // the members and semantics of any additional properties. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Enum={"Always","PromQL"} - // +required - Type string `json:"type"` - - // promQL represents a cluster condition based on PromQL. - // +optional - PromQL *PromQLClusterCondition `json:"promql,omitempty"` -} - -// PromQLClusterCondition represents a cluster condition based on PromQL. -type PromQLClusterCondition struct { - // PromQL is a PromQL query classifying clusters. This query - // query should return a 1 in the match case and a 0 in the - // does-not-match case. Queries which return no time - // series, or which return values besides 0 or 1, are - // evaluation failures. - // +kubebuilder:validation:Required - // +required - PromQL string `json:"promql"` -} - -// ClusterVersionList is a list of ClusterVersion resources. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:compatibility-gen:level=1 -type ClusterVersionList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []ClusterVersion `json:"items"` -} - -// SignatureStore represents the URL of custom Signature Store -type SignatureStore struct { - - // url contains the upstream custom signature store URL. - // url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" - // +kubebuilder:validation:Required - URL string `json:"url"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the signature store is not honored. - // If the specified ca data is not valid, the signature store is not honored. - // If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go deleted file mode 100644 index e8f197b3..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ /dev/null @@ -1,81 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Console holds cluster-wide configuration for the web console, including the -// logout URL, and reports the public URL of the console. The canonical name is -// `cluster`. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=consoles,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Console struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ConsoleSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ConsoleStatus `json:"status"` -} - -// ConsoleSpec is the specification of the desired behavior of the Console. -type ConsoleSpec struct { - // +optional - Authentication ConsoleAuthentication `json:"authentication"` -} - -// ConsoleStatus defines the observed status of the Console. -type ConsoleStatus struct { - // The URL for the console. This will be derived from the host for the route that - // is created for the console. - ConsoleURL string `json:"consoleURL"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ConsoleList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Console `json:"items"` -} - -// ConsoleAuthentication defines a list of optional configuration for console authentication. -type ConsoleAuthentication struct { - // An optional, absolute URL to redirect web browsers to after logging out of - // the console. If not specified, it will redirect to the default login page. - // This is required when using an identity provider that supports single - // sign-on (SSO) such as: - // - OpenID (Keycloak, Azure) - // - RequestHeader (GSSAPI, SSPI, SAML) - // - OAuth (GitHub, GitLab, Google) - // Logging out of the console will destroy the user's token. The logoutRedirect - // provides the user the option to perform single logout (SLO) through the identity - // provider to destroy their single sign-on session. - // +optional - // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` - LogoutRedirect string `json:"logoutRedirect,omitempty"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go deleted file mode 100644 index 5daa5d78..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ /dev/null @@ -1,141 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DNS holds cluster-wide information about DNS. The canonical name is `cluster` -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=dnses,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type DNS struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec DNSSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status DNSStatus `json:"status"` -} - -type DNSSpec struct { - // baseDomain is the base domain of the cluster. All managed DNS records will - // be sub-domains of this base. - // - // For example, given the base domain `openshift.example.com`, an API server - // DNS record may be created for `cluster-api.openshift.example.com`. - // - // Once set, this field cannot be changed. - BaseDomain string `json:"baseDomain"` - // publicZone is the location where all the DNS records that are publicly accessible to - // the internet exist. - // - // If this field is nil, no public records should be created. - // - // Once set, this field cannot be changed. - // - // +optional - PublicZone *DNSZone `json:"publicZone,omitempty"` - // privateZone is the location where all the DNS records that are only available internally - // to the cluster exist. - // - // If this field is nil, no private records should be created. - // - // Once set, this field cannot be changed. - // - // +optional - PrivateZone *DNSZone `json:"privateZone,omitempty"` - // platform holds configuration specific to the underlying - // infrastructure provider for DNS. - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // +optional - Platform DNSPlatformSpec `json:"platform,omitempty"` -} - -// DNSZone is used to define a DNS hosted zone. -// A zone can be identified by an ID or tags. -type DNSZone struct { - // id is the identifier that can be used to find the DNS hosted zone. - // - // on AWS zone can be fetched using `ID` as id in [1] - // on Azure zone can be fetched using `ID` as a pre-determined name in [2], - // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. - // - // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get - // +optional - ID string `json:"id,omitempty"` - - // tags can be used to query the DNS hosted zone. - // - // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, - // - // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options - // +optional - Tags map[string]string `json:"tags,omitempty"` -} - -type DNSStatus struct { - // dnsSuffix (service-ca amongst others) -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type DNSList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []DNS `json:"items"` -} - -// DNSPlatformSpec holds cloud-provider-specific configuration -// for DNS administration. -// +union -// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise" -type DNSPlatformSpec struct { - // type is the underlying infrastructure provider for the cluster. - // Allowed values: "", "AWS". - // - // Individual components may not support all platforms, - // and must handle unrecognized platforms with best-effort defaults. - // - // +unionDiscriminator - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" - Type PlatformType `json:"type"` - - // aws contains DNS configuration specific to the Amazon Web Services cloud provider. - // +optional - AWS *AWSDNSSpec `json:"aws"` -} - -// AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider. -type AWSDNSSpec struct { - // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing - // operations on the cluster's private hosted zone specified in the cluster DNS config. - // When left empty, no role should be assumed. - // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` - // +optional - PrivateZoneIAMRole string `json:"privateZoneIAMRole"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go deleted file mode 100644 index 99d99f5b..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ /dev/null @@ -1,152 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=featuregates,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type FeatureGate struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" - Spec FeatureGateSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status FeatureGateStatus `json:"status"` -} - -type FeatureSet string - -var ( - // Default feature set that allows upgrades. - Default FeatureSet = "" - - // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning - // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. - TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" - - // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning - // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. - DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade" - - // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. - // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations - // your cluster may fail in an unrecoverable way. - CustomNoUpgrade FeatureSet = "CustomNoUpgrade" - - // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead - AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} -) - -type FeatureGateSpec struct { - FeatureGateSelection `json:",inline"` -} - -// +union -type FeatureGateSelection struct { - // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. - // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. - // +unionDiscriminator - // +optional - // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" - // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" - // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" - FeatureSet FeatureSet `json:"featureSet,omitempty"` - - // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. - // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations - // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. - // +optional - // +nullable - CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` -} - -type CustomFeatureGates struct { - // enabled is a list of all feature gates that you want to force on - // +optional - Enabled []FeatureGateName `json:"enabled,omitempty"` - // disabled is a list of all feature gates that you want to force off - // +optional - Disabled []FeatureGateName `json:"disabled,omitempty"` -} - -// FeatureGateName is a string to enforce patterns on the name of a FeatureGate -// +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` -type FeatureGateName string - -type FeatureGateStatus struct { - // conditions represent the observations of the current state. - // Known .status.conditions.type are: "DeterminationDegraded" - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty"` - - // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. - // Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate - // the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. - // The enabled/disabled values for a particular version may change during the life of the cluster as various - // .spec.featureSet values are selected. - // Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable - // lists is beyond the scope of this API and is the responsibility of individual operators. - // Only featureGates with .version in the ClusterVersion.status will be present in this list. - // +listType=map - // +listMapKey=version - FeatureGates []FeatureGateDetails `json:"featureGates"` -} - -type FeatureGateDetails struct { - // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. - // +kubebuilder:validation:Required - // +required - Version string `json:"version"` - // enabled is a list of all feature gates that are enabled in the cluster for the named version. - // +optional - Enabled []FeatureGateAttributes `json:"enabled"` - // disabled is a list of all feature gates that are disabled in the cluster for the named version. - // +optional - Disabled []FeatureGateAttributes `json:"disabled"` -} - -type FeatureGateAttributes struct { - // name is the name of the FeatureGate. - // +kubebuilder:validation:Required - Name FeatureGateName `json:"name"` - - // possible (probable?) future additions include - // 1. support level (Stable, ServiceDeliveryOnly, TechPreview, DevPreview) - // 2. description -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type FeatureGateList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []FeatureGate `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go deleted file mode 100644 index a344086c..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ /dev/null @@ -1,138 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Image governs policies related to imagestream imports and runtime configuration -// for external registries. It allows cluster admins to configure which registries -// OpenShift is allowed to import images from, extra CA trust bundles for external -// registries, and policies to block or allow registry hostnames. -// When exposing OpenShift's image registry to the public, this also lets cluster -// admins specify the external hostname. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=images,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Image struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ImageSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ImageStatus `json:"status"` -} - -type ImageSpec struct { - // allowedRegistriesForImport limits the container image registries that normal users may import - // images from. Set this list to the registries that you trust to contain valid Docker - // images and that you want applications to be able to import from. Users with - // permission to create Images or ImageStreamMappings via the API are not affected by - // this policy - typically only administrators or system integrations will have those - // permissions. - // +optional - AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` - - // externalRegistryHostnames provides the hostnames for the default external image - // registry. The external hostname should be set only when the image registry - // is exposed externally. The first value is used in 'publicDockerImageRepository' - // field in ImageStreams. The value must be in "hostname[:port]" format. - // +optional - ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` - - // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that - // should be trusted during imagestream import, pod image pull, build image pull, and - // imageregistry pullthrough. - // The namespace for this config map is openshift-config. - // +optional - AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - - // registrySources contains configuration that determines how the container runtime - // should treat individual registries when accessing images for builds+pods. (e.g. - // whether or not to allow insecure access). It does not contain configuration for the - // internal cluster registry. - // +optional - RegistrySources RegistrySources `json:"registrySources"` -} - -type ImageStatus struct { - // internalRegistryHostname sets the hostname for the default internal image - // registry. The value must be in "hostname[:port]" format. - // This value is set by the image registry operator which controls the internal registry - // hostname. - // +optional - InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` - - // externalRegistryHostnames provides the hostnames for the default external image - // registry. The external hostname should be set only when the image registry - // is exposed externally. The first value is used in 'publicDockerImageRepository' - // field in ImageStreams. The value must be in "hostname[:port]" format. - // +optional - ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ImageList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Image `json:"items"` -} - -// RegistryLocation contains a location of the registry specified by the registry domain -// name. The domain name might include wildcards, like '*' or '??'. -type RegistryLocation struct { - // domainName specifies a domain name for the registry - // In case the registry use non-standard (80 or 443) port, the port should be included - // in the domain name as well. - DomainName string `json:"domainName"` - // insecure indicates whether the registry is secure (https) or insecure (http) - // By default (if not specified) the registry is assumed as secure. - // +optional - Insecure bool `json:"insecure,omitempty"` -} - -// RegistrySources holds cluster-wide information about how to handle the registries config. -type RegistrySources struct { - // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. - // +optional - InsecureRegistries []string `json:"insecureRegistries,omitempty"` - // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. - // - // Only one of BlockedRegistries or AllowedRegistries may be set. - // +optional - BlockedRegistries []string `json:"blockedRegistries,omitempty"` - // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. - // - // Only one of BlockedRegistries or AllowedRegistries may be set. - // +optional - AllowedRegistries []string `json:"allowedRegistries,omitempty"` - // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified - // domains in their pull specs. Registries will be searched in the order provided in the list. - // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. - // +optional - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Format=hostname - // +listType=set - ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go deleted file mode 100644 index 74df4027..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ /dev/null @@ -1,101 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. -// When multiple policies are defined, the outcome of the behavior is defined on each field. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type ImageContentPolicy struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ImageContentPolicySpec `json:"spec"` -} - -// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. -type ImageContentPolicySpec struct { - // repositoryDigestMirrors allows images referenced by image digests in pods to be - // pulled from alternative mirrored repository locations. The image pull specification - // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors - // and the image may be pulled down from any of the mirrors in the list instead of the - // specified repository allowing administrators to choose a potentially faster mirror. - // To pull image from mirrors by tags, should set the "allowMirrorByTags". - // - // Each “source” repository is treated independently; configurations for different “source” - // repositories don’t interact. - // - // If the "mirrors" is not specified, the image will continue to be pulled from the specified - // repository in the pull spec. - // - // When multiple policies are defined for the same “source” repository, the sets of defined - // mirrors will be merged together, preserving the relative order of the mirrors, if possible. - // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the - // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict - // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. - // +optional - // +listType=map - // +listMapKey=source - RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ImageContentPolicyList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []ImageContentPolicy `json:"items"` -} - -// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. -type RepositoryDigestMirrors struct { - // source is the repository that users refer to, e.g. in image pull specifications. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` - Source string `json:"source"` - // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. - // Pulling images by tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. - // +optional - AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` - // mirrors is zero or more repositories that may also contain the same images. - // If the "mirrors" is not specified, the image will continue to be pulled from the specified - // repository in the pull spec. No mirror will be configured. - // The order of mirrors in this list is treated as the user's desired priority, while source - // is by default considered lower priority than all mirrors. Other cluster configuration, - // including (but not limited to) other repositoryDigestMirrors objects, - // may impact the exact order mirrors are contacted in, or some mirrors may be contacted - // in parallel, so this should be considered a preference rather than a guarantee of ordering. - // +optional - // +listType=set - Mirrors []Mirror `json:"mirrors,omitempty"` -} - -// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` -type Mirror string diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go deleted file mode 100644 index 43d748c0..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ /dev/null @@ -1,143 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. -// When multiple policies are defined, the outcome of the behavior is defined on each field. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type ImageDigestMirrorSet struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ImageDigestMirrorSetSpec `json:"spec"` - // status contains the observed state of the resource. - // +optional - Status ImageDigestMirrorSetStatus `json:"status,omitempty"` -} - -// ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD. -type ImageDigestMirrorSetSpec struct { - // imageDigestMirrors allows images referenced by image digests in pods to be - // pulled from alternative mirrored repository locations. The image pull specification - // provided to the pod will be compared to the source locations described in imageDigestMirrors - // and the image may be pulled down from any of the mirrors in the list instead of the - // specified repository allowing administrators to choose a potentially faster mirror. - // To use mirrors to pull images using tag specification, users should configure - // a list of mirrors using "ImageTagMirrorSet" CRD. - // - // If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects, - // only the objects which define the most specific namespace match will be used. - // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as - // the "source", only the objects using quay.io/libpod/busybox are going to apply - // for pull specification quay.io/libpod/busybox. - // Each “source” repository is treated independently; configurations for different “source” - // repositories don’t interact. - // - // If the "mirrors" is not specified, the image will continue to be pulled from the specified - // repository in the pull spec. - // - // When multiple policies are defined for the same “source” repository, the sets of defined - // mirrors will be merged together, preserving the relative order of the mirrors, if possible. - // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the - // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict - // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. - // Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order. - // +optional - // +listType=atomic - ImageDigestMirrors []ImageDigestMirrors `json:"imageDigestMirrors"` -} - -type ImageDigestMirrorSetStatus struct{} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ImageDigestMirrorSetList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []ImageDigestMirrorSet `json:"items"` -} - -// +kubebuilder:validation:Pattern=`^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` -type ImageMirror string - -// MirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. -// +kubebuilder:validation:Enum=NeverContactSource;AllowContactingSource -type MirrorSourcePolicy string - -const ( - // NeverContactSource prevents image pull from the specified repository in the pull spec if the image pull from the mirror list fails. - NeverContactSource MirrorSourcePolicy = "NeverContactSource" - - // AllowContactingSource allows falling back to the specified repository in the pull spec if the image pull from the mirror list fails. - AllowContactingSource MirrorSourcePolicy = "AllowContactingSource" -) - -// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. -type ImageDigestMirrors struct { - // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname - // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. - // "source" uses one of the following formats: - // host[:port] - // host[:port]/namespace[/namespace…] - // host[:port]/namespace[/namespace…]/repo - // [*.]host - // for more information about the format, see the document about the location field: - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` - Source string `json:"source"` - // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. - // Images can be pulled from these mirrors only if they are referenced by their digests. - // The mirrored location is obtained by replacing the part of the input reference that - // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, - // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo - // repository to be used. - // The order of mirrors in this list is treated as the user's desired priority, while source - // is by default considered lower priority than all mirrors. - // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be - // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" - // Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, - // may impact the exact order mirrors are contacted in, or some mirrors may be contacted - // in parallel, so this should be considered a preference rather than a guarantee of ordering. - // "mirrors" uses one of the following formats: - // host[:port] - // host[:port]/namespace[/namespace…] - // host[:port]/namespace[/namespace…]/repo - // for more information about the format, see the document about the location field: - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table - // +optional - // +listType=set - Mirrors []ImageMirror `json:"mirrors,omitempty"` - // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. - // If unset, the image will continue to be pulled from the the repository in the pull spec. - // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. - // +optional - MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go deleted file mode 100644 index ca8d3551..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ /dev/null @@ -1,130 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. -// When multiple policies are defined, the outcome of the behavior is defined on each field. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type ImageTagMirrorSet struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ImageTagMirrorSetSpec `json:"spec"` - // status contains the observed state of the resource. - // +optional - Status ImageTagMirrorSetStatus `json:"status,omitempty"` -} - -// ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD. -type ImageTagMirrorSetSpec struct { - // imageTagMirrors allows images referenced by image tags in pods to be - // pulled from alternative mirrored repository locations. The image pull specification - // provided to the pod will be compared to the source locations described in imageTagMirrors - // and the image may be pulled down from any of the mirrors in the list instead of the - // specified repository allowing administrators to choose a potentially faster mirror. - // To use mirrors to pull images using digest specification only, users should configure - // a list of mirrors using "ImageDigestMirrorSet" CRD. - // - // If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects, - // only the objects which define the most specific namespace match will be used. - // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as - // the "source", only the objects using quay.io/libpod/busybox are going to apply - // for pull specification quay.io/libpod/busybox. - // Each “source” repository is treated independently; configurations for different “source” - // repositories don’t interact. - // - // If the "mirrors" is not specified, the image will continue to be pulled from the specified - // repository in the pull spec. - // - // When multiple policies are defined for the same “source” repository, the sets of defined - // mirrors will be merged together, preserving the relative order of the mirrors, if possible. - // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the - // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict - // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. - // Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order. - // +optional - // +listType=atomic - ImageTagMirrors []ImageTagMirrors `json:"imageTagMirrors"` -} - -type ImageTagMirrorSetStatus struct{} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ImageTagMirrorSetList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []ImageTagMirrorSet `json:"items"` -} - -// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. -type ImageTagMirrors struct { - // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname - // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. - // "source" uses one of the following formats: - // host[:port] - // host[:port]/namespace[/namespace…] - // host[:port]/namespace[/namespace…]/repo - // [*.]host - // for more information about the format, see the document about the location field: - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` - Source string `json:"source"` - // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. - // Images can be pulled from these mirrors only if they are referenced by their tags. - // The mirrored location is obtained by replacing the part of the input reference that - // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, - // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo - // repository to be used. - // Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. - // Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. - // The order of mirrors in this list is treated as the user's desired priority, while source - // is by default considered lower priority than all mirrors. - // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be - // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". - // Other cluster configuration, including (but not limited to) other imageTagMirrors objects, - // may impact the exact order mirrors are contacted in, or some mirrors may be contacted - // in parallel, so this should be considered a preference rather than a guarantee of ordering. - // "mirrors" uses one of the following formats: - // host[:port] - // host[:port]/namespace[/namespace…] - // host[:port]/namespace[/namespace…]/repo - // for more information about the format, see the document about the location field: - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table - // +optional - // +listType=set - Mirrors []ImageMirror `json:"mirrors,omitempty"` - // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. - // If unset, the image will continue to be pulled from the repository in the pull spec. - // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. - // +optional - MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go deleted file mode 100644 index e5ff5fc6..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ /dev/null @@ -1,1887 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status - -// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=infrastructures,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Infrastructure struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec InfrastructureSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status InfrastructureStatus `json:"status"` -} - -// InfrastructureSpec contains settings that apply to the cluster infrastructure. -type InfrastructureSpec struct { - // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. - // This configuration file is used to configure the Kubernetes cloud provider integration - // when using the built-in cloud provider integration or the external cloud controller manager. - // The namespace for this config map is openshift-config. - // - // cloudConfig should only be consumed by the kube_cloud_config controller. - // The controller is responsible for using the user configuration in the spec - // for various platforms and combining that with the user provided ConfigMap in this field - // to create a stitched kube cloud config. - // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace - // with the kube cloud config is stored in `cloud.conf` key. - // All the clients are expected to use the generated ConfigMap only. - // - // +optional - CloudConfig ConfigMapFileReference `json:"cloudConfig"` - - // platformSpec holds desired information specific to the underlying - // infrastructure provider. - PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` -} - -// InfrastructureStatus describes the infrastructure the cluster is leveraging. -type InfrastructureStatus struct { - // infrastructureName uniquely identifies a cluster with a human friendly name. - // Once set it should not be changed. Must be of max length 27 and must have only - // alphanumeric or hyphen characters. - InfrastructureName string `json:"infrastructureName"` - - // platform is the underlying infrastructure provider for the cluster. - // - // Deprecated: Use platformStatus.type instead. - Platform PlatformType `json:"platform,omitempty"` - - // platformStatus holds status information specific to the underlying - // infrastructure provider. - // +optional - PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` - - // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering - // etcd servers and clients. - // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. - EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` - - // apiServerURL is a valid URI with scheme 'https', address and - // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console - // to tell users where to find the Kubernetes API. - APIServerURL string `json:"apiServerURL"` - - // apiServerInternalURL is a valid URI with scheme 'https', - // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components - // like kubelets, to contact the Kubernetes API server using the - // infrastructure provider rather than Kubernetes networking. - APIServerInternalURL string `json:"apiServerInternalURI"` - - // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. - // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. - // The 'SingleReplica' mode will be used in single-node deployments - // and the operators should not configure the operand for highly-available operation - // The 'External' mode indicates that the control plane is hosted externally to the cluster and that - // its components are not visible within the cluster. - // +kubebuilder:default=HighlyAvailable - // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External - ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` - - // infrastructureTopology expresses the expectations for infrastructure services that do not run on control - // plane nodes, usually indicated by a node selector for a `role` value - // other than `master`. - // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. - // The 'SingleReplica' mode will be used in single-node deployments - // and the operators should not configure the operand for highly-available operation - // NOTE: External topology mode is not applicable for this field. - // +kubebuilder:default=HighlyAvailable - // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica - InfrastructureTopology TopologyMode `json:"infrastructureTopology"` - - // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. - // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. - // Valid values are "None" and "AllNodes". When omitted, the default value is "None". - // The default value of "None" indicates that no nodes will be setup with CPU partitioning. - // The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, - // and can then be further configured via the PerformanceProfile API. - // +kubebuilder:default=None - // +default="None" - // +kubebuilder:validation:Enum=None;AllNodes - // +optional - CPUPartitioning CPUPartitioningMode `json:"cpuPartitioning,omitempty"` -} - -// TopologyMode defines the topology mode of the control/infra nodes. -// NOTE: Enum validation is specified in each field that uses this type, -// given that External value is not applicable to the InfrastructureTopology -// field. -type TopologyMode string - -const ( - // "HighlyAvailable" is for operators to configure high-availability as much as possible. - HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" - - // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. - SingleReplicaTopologyMode TopologyMode = "SingleReplica" - - // "External" indicates that the component is running externally to the cluster. When specified - // as the control plane topology, operators should avoid scheduling workloads to masters or assume - // that any of the control plane components such as kubernetes API server or etcd are visible within - // the cluster. - ExternalTopologyMode TopologyMode = "External" -) - -// CPUPartitioningMode defines the mode for CPU partitioning -type CPUPartitioningMode string - -const ( - // CPUPartitioningNone means that no CPU Partitioning is on in this cluster infrastructure - CPUPartitioningNone CPUPartitioningMode = "None" - - // CPUPartitioningAllNodes means that all nodes are configured with CPU Partitioning in this cluster - CPUPartitioningAllNodes CPUPartitioningMode = "AllNodes" -) - -// PlatformLoadBalancerType defines the type of load balancer used by the cluster. -type PlatformLoadBalancerType string - -const ( - // LoadBalancerTypeUserManaged is a load balancer with control-plane VIPs managed outside of the cluster by the customer. - LoadBalancerTypeUserManaged PlatformLoadBalancerType = "UserManaged" - - // LoadBalancerTypeOpenShiftManagedDefault is the default load balancer with control-plane VIPs managed by the OpenShift cluster. - LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" -) - -// PlatformType is a specific supported infrastructure provider. -// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External -type PlatformType string - -const ( - // AWSPlatformType represents Amazon Web Services infrastructure. - AWSPlatformType PlatformType = "AWS" - - // AzurePlatformType represents Microsoft Azure infrastructure. - AzurePlatformType PlatformType = "Azure" - - // BareMetalPlatformType represents managed bare metal infrastructure. - BareMetalPlatformType PlatformType = "BareMetal" - - // GCPPlatformType represents Google Cloud Platform infrastructure. - GCPPlatformType PlatformType = "GCP" - - // LibvirtPlatformType represents libvirt infrastructure. - LibvirtPlatformType PlatformType = "Libvirt" - - // OpenStackPlatformType represents OpenStack infrastructure. - OpenStackPlatformType PlatformType = "OpenStack" - - // NonePlatformType means there is no infrastructure provider. - NonePlatformType PlatformType = "None" - - // VSpherePlatformType represents VMWare vSphere infrastructure. - VSpherePlatformType PlatformType = "VSphere" - - // OvirtPlatformType represents oVirt/RHV infrastructure. - OvirtPlatformType PlatformType = "oVirt" - - // IBMCloudPlatformType represents IBM Cloud infrastructure. - IBMCloudPlatformType PlatformType = "IBMCloud" - - // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. - KubevirtPlatformType PlatformType = "KubeVirt" - - // EquinixMetalPlatformType represents Equinix Metal infrastructure. - EquinixMetalPlatformType PlatformType = "EquinixMetal" - - // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. - PowerVSPlatformType PlatformType = "PowerVS" - - // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. - AlibabaCloudPlatformType PlatformType = "AlibabaCloud" - - // NutanixPlatformType represents Nutanix infrastructure. - NutanixPlatformType PlatformType = "Nutanix" - - // ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. - ExternalPlatformType PlatformType = "External" -) - -// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type -type IBMCloudProviderType string - -const ( - // Classic means that the IBM Cloud cluster is using classic infrastructure - IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" - - // VPC means that the IBM Cloud cluster is using VPC infrastructure - IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" - - // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure. - // This is utilized in IBM Cloud Satellite environments. - IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" -) - -// DNSType indicates whether the cluster DNS is hosted by the cluster or Core DNS . -type DNSType string - -const ( - // ClusterHosted indicates that a DNS solution other than the default provided by the - // cloud platform is in use. In this mode, the cluster hosts a DNS solution during installation and the - // user is expected to provide their own DNS solution post-install. - // When the DNS solution is `ClusterHosted`, the cluster will continue to use the - // default Load Balancers provided by the cloud platform. - ClusterHostedDNSType DNSType = "ClusterHosted" - - // PlatformDefault indicates that the cluster is using the default DNS solution for the - // cloud platform. OpenShift is responsible for all the LB and DNS configuration needed for the - // cluster to be functional with no intervention from the user. To accomplish this, OpenShift - // configures the default LB and DNS solutions provided by the underlying cloud. - PlatformDefaultDNSType DNSType = "PlatformDefault" -) - -// ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. -type ExternalPlatformSpec struct { - // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. - // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. - // +kubebuilder:default:="Unknown" - // +default="Unknown" - // +kubebuilder:validation:XValidation:rule="oldSelf == 'Unknown' || self == oldSelf",message="platform name cannot be changed once set" - // +optional - PlatformName string `json:"platformName,omitempty"` -} - -// PlatformSpec holds the desired state specific to the underlying infrastructure provider -// of the current cluster. Since these are used at spec-level for the underlying cluster, it -// is supposed that only one of the spec structs is set. -type PlatformSpec struct { - // type is the underlying infrastructure provider for the cluster. This - // value controls whether infrastructure automation such as service load - // balancers, dynamic volume provisioning, machine creation and deletion, and - // other integrations are enabled. If None, no infrastructure automation is - // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - // and must handle unrecognized platforms as None if they do not support that platform. - // - // +unionDiscriminator - Type PlatformType `json:"type"` - - // AWS contains settings specific to the Amazon Web Services infrastructure provider. - // +optional - AWS *AWSPlatformSpec `json:"aws,omitempty"` - - // Azure contains settings specific to the Azure infrastructure provider. - // +optional - Azure *AzurePlatformSpec `json:"azure,omitempty"` - - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. - // +optional - GCP *GCPPlatformSpec `json:"gcp,omitempty"` - - // BareMetal contains settings specific to the BareMetal platform. - // +optional - BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` - - // OpenStack contains settings specific to the OpenStack infrastructure provider. - // +optional - OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` - - // Ovirt contains settings specific to the oVirt infrastructure provider. - // +optional - Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` - - // VSphere contains settings specific to the VSphere infrastructure provider. - // +optional - VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` - - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. - // +optional - IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - - // Kubevirt contains settings specific to the kubevirt infrastructure provider. - // +optional - Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` - - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. - // +optional - EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` - - // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. - // +optional - PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. - // +optional - AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` - - // Nutanix contains settings specific to the Nutanix infrastructure provider. - // +optional - Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` - - // ExternalPlatformType represents generic infrastructure provider. - // Platform-specific components should be supplemented separately. - // +optional - External *ExternalPlatformSpec `json:"external,omitempty"` -} - -// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not -type CloudControllerManagerState string - -const ( - // Cloud Controller Manager is enabled and expected to be installed. - // This value indicates that new nodes should be tainted as uninitialized when created, - // preventing them from running workloads until they are initialized by the cloud controller manager. - CloudControllerManagerExternal CloudControllerManagerState = "External" - - // Cloud Controller Manager is disabled and not expected to be installed. - // This value indicates that new nodes should not be tainted - // and no extra node initialization is expected from the cloud controller manager. - CloudControllerManagerNone CloudControllerManagerState = "None" -) - -// CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings -// +kubebuilder:validation:XValidation:rule="(has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != \"External\")",message="state may not be added or removed once set" -type CloudControllerManagerStatus struct { - // state determines whether or not an external Cloud Controller Manager is expected to - // be installed within the cluster. - // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - // - // Valid values are "External", "None" and omitted. - // When set to "External", new nodes will be tainted as uninitialized when created, - // preventing them from running workloads until they are initialized by the cloud controller manager. - // When omitted or set to "None", new nodes will be not tainted - // and no extra initialization from the cloud controller manager is expected. - // +kubebuilder:validation:Enum="";External;None - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="state is immutable once set" - // +optional - State CloudControllerManagerState `json:"state"` -} - -// ExternalPlatformStatus holds the current status of the generic External infrastructure provider. -// +kubebuilder:validation:XValidation:rule="has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager)",message="cloudControllerManager may not be added or removed once set" -type ExternalPlatformStatus struct { - // cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). - // When omitted, new nodes will be not tainted - // and no extra initialization from the cloud controller manager is expected. - // +optional - CloudControllerManager CloudControllerManagerStatus `json:"cloudControllerManager"` -} - -// PlatformStatus holds the current status specific to the underlying infrastructure provider -// of the current cluster. Since these are used at status-level for the underlying cluster, it -// is supposed that only one of the status structs is set. -type PlatformStatus struct { - // type is the underlying infrastructure provider for the cluster. This - // value controls whether infrastructure automation such as service load - // balancers, dynamic volume provisioning, machine creation and deletion, and - // other integrations are enabled. If None, no infrastructure automation is - // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". - // Individual components may not support all platforms, and must handle - // unrecognized platforms as None if they do not support that platform. - // - // This value will be synced with to the `status.platform` and `status.platformStatus.type`. - // Currently this value cannot be changed once set. - Type PlatformType `json:"type"` - - // AWS contains settings specific to the Amazon Web Services infrastructure provider. - // +optional - AWS *AWSPlatformStatus `json:"aws,omitempty"` - - // Azure contains settings specific to the Azure infrastructure provider. - // +optional - Azure *AzurePlatformStatus `json:"azure,omitempty"` - - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. - // +optional - GCP *GCPPlatformStatus `json:"gcp,omitempty"` - - // BareMetal contains settings specific to the BareMetal platform. - // +optional - BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` - - // OpenStack contains settings specific to the OpenStack infrastructure provider. - // +optional - OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` - - // Ovirt contains settings specific to the oVirt infrastructure provider. - // +optional - Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` - - // VSphere contains settings specific to the VSphere infrastructure provider. - // +optional - VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` - - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. - // +optional - IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` - - // Kubevirt contains settings specific to the kubevirt infrastructure provider. - // +optional - Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` - - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. - // +optional - EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` - - // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. - // +optional - PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` - - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. - // +optional - AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` - - // Nutanix contains settings specific to the Nutanix infrastructure provider. - // +optional - Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` - - // External contains settings specific to the generic External infrastructure provider. - // +optional - External *ExternalPlatformStatus `json:"external,omitempty"` -} - -// AWSServiceEndpoint store the configuration of a custom url to -// override existing defaults of AWS Services. -type AWSServiceEndpoint struct { - // name is the name of the AWS service. - // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` - Name string `json:"name"` - - // url is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` -} - -// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. -// This only includes fields that can be modified in the cluster. -type AWSPlatformSpec struct { - // serviceEndpoints list contains custom endpoints which will override default - // service endpoint of AWS Services. - // There must be only one ServiceEndpoint for a service. - // +listType=atomic - // +optional - ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` -} - -// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. -type AWSPlatformStatus struct { - // region holds the default AWS region for new AWS resources created by the cluster. - Region string `json:"region"` - - // ServiceEndpoints list contains custom endpoints which will override default - // service endpoint of AWS Services. - // There must be only one ServiceEndpoint for a service. - // +listType=atomic - // +optional - ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` - - // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. - // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. - // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags - // available for the user. - // +kubebuilder:validation:MaxItems=25 - // +listType=atomic - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` -} - -// AWSResourceTag is a tag to apply to AWS resources created for the cluster. -type AWSResourceTag struct { - // key is the key of the tag - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - // +required - Key string `json:"key"` - // value is the value of the tag. - // Some AWS service do not support empty values. Since tags are added to resources in many services, the - // length of the tag value must meet the requirements of all services. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - // +required - Value string `json:"value"` -} - -// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. -// This only includes fields that can be modified in the cluster. -type AzurePlatformSpec struct{} - -// AzurePlatformStatus holds the current status of the Azure infrastructure provider. -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" -type AzurePlatformStatus struct { - // resourceGroupName is the Resource Group for new Azure resources created for the cluster. - ResourceGroupName string `json:"resourceGroupName"` - - // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. - // If empty, the value is same as ResourceGroupName. - // +optional - NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` - - // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK - // with the appropriate Azure API endpoints. - // If empty, the value is equal to `AzurePublicCloud`. - // +optional - CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` - - // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. - // +optional - ARMEndpoint string `json:"armEndpoint,omitempty"` - - // resourceTags is a list of additional tags to apply to Azure resources created for the cluster. - // See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. - // Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags - // may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. - // +kubebuilder:validation:MaxItems=10 - // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" - // +listType=atomic - // +optional - ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` -} - -// AzureResourceTag is a tag to apply to Azure resources created for the cluster. -type AzureResourceTag struct { - // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key - // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric - // characters and the following special characters `_ . -`. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` - Key string `json:"key"` - // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value - // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` - Value string `json:"value"` -} - -// AzureCloudEnvironment is the name of the Azure cloud environment -// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud -type AzureCloudEnvironment string - -const ( - // AzurePublicCloud is the general-purpose, public Azure cloud environment. - AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" - - // AzureUSGovernmentCloud is the Azure cloud environment for the US government. - AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" - - // AzureChinaCloud is the Azure cloud environment used in China. - AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" - - // AzureGermanCloud is the Azure cloud environment used in Germany. - AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" - - // AzureStackCloud is the Azure cloud environment used at the edge and on premises. - AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" -) - -// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. -// This only includes fields that can be modified in the cluster. -type GCPPlatformSpec struct{} - -// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" -type GCPPlatformStatus struct { - // resourceGroupName is the Project ID for new GCP resources created for the cluster. - ProjectID string `json:"projectID"` - - // region holds the region for new GCP resources created for the cluster. - Region string `json:"region"` - - // resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. - // See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. - // GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, - // allowing 32 labels for user configuration. - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceLabels are immutable and may only be configured during installation" - // +listType=map - // +listMapKey=key - // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags - ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` - - // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. - // See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on - // tagging GCP resources. GCP supports a maximum of 50 tags per resource. - // +kubebuilder:validation:MaxItems=50 - // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" - // +listType=map - // +listMapKey=key - // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags - ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` - - // This field was introduced and removed under tech preview. - // To avoid conflicts with serialisation, this field name may never be used again. - // Tombstone the field as a reminder. - // ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"` - - // cloudLoadBalancerConfig is a union that contains the IP addresses of API, - // API-Int and Ingress Load Balancers created on the cloud platform. These - // values would not be populated on on-prem platforms. These Load Balancer - // IPs are used to configure the in-cluster DNS instances for API, API-Int - // and Ingress services. `dnsType` is expected to be set to `ClusterHosted` - // when these Load Balancer IP addresses are populated and used. - // - // +default={"dnsType": "PlatformDefault"} - // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNS - // +optional - // +nullable - CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` -} - -// GCPResourceLabel is a label to apply to GCP resources created for the cluster. -type GCPResourceLabel struct { - // key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. - // Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, - // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` - // and `openshift-io`. - // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` - Key string `json:"key"` - - // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. - // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` - Value string `json:"value"` -} - -// GCPResourceTag is a tag to apply to GCP resources created for the cluster. -type GCPResourceTag struct { - // parentID is the ID of the hierarchical resource where the tags are defined, - // e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: - // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, - // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. - // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. - // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, - // and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=32 - // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` - ParentID string `json:"parentID"` - - // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. - // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase - // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` - Key string `json:"key"` - - // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. - // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase - // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` - Value string `json:"value"` -} - -// CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS -// solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's -// Load Balancer configuration needs to be provided so that the DNS solution hosted -// within the cluster can be configured with those values. -// +kubebuilder:validation:XValidation:rule="has(self.dnsType) && self.dnsType != 'ClusterHosted' ? !has(self.clusterHosted) : true",message="clusterHosted is permitted only when dnsType is ClusterHosted" -// +union -type CloudLoadBalancerConfig struct { - // dnsType indicates the type of DNS solution in use within the cluster. Its default value of - // `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. - // It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, - // the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. - // The cluster's use of the cloud's Load Balancers is unaffected by this setting. - // The value is immutable after it has been set at install time. - // Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. - // Enabling this functionality allows the user to start their own DNS solution outside the cluster after - // installation is complete. The customer would be responsible for configuring this custom DNS solution, - // and it can be run in addition to the in-cluster DNS solution. - // +default="PlatformDefault" - // +kubebuilder:default:="PlatformDefault" - // +kubebuilder:validation:Enum="ClusterHosted";"PlatformDefault" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="dnsType is immutable" - // +optional - // +unionDiscriminator - DNSType DNSType `json:"dnsType,omitempty"` - - // clusterHosted holds the IP addresses of API, API-Int and Ingress Load - // Balancers on Cloud Platforms. The DNS solution hosted within the cluster - // use these IP addresses to provide resolution for API, API-Int and Ingress - // services. - // +optional - // +unionMember,optional - ClusterHosted *CloudLoadBalancerIPs `json:"clusterHosted,omitempty"` -} - -// CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, -// API-Int and Ingress Load balancers. They will be populated as soon as the -// respective Load Balancers have been configured. These values are utilized -// to configure the DNS solution hosted within the cluster. -type CloudLoadBalancerIPs struct { - // apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. - // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. - // Entries in the apiIntLoadBalancerIPs must be unique. - // A maximum of 16 IP addresses are permitted. - // +kubebuilder:validation:Format=ip - // +listType=set - // +kubebuilder:validation:MaxItems=16 - // +optional - APIIntLoadBalancerIPs []IP `json:"apiIntLoadBalancerIPs,omitempty"` - - // apiLoadBalancerIPs holds Load Balancer IPs for the API service. - // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. - // Could be empty for private clusters. - // Entries in the apiLoadBalancerIPs must be unique. - // A maximum of 16 IP addresses are permitted. - // +kubebuilder:validation:Format=ip - // +listType=set - // +kubebuilder:validation:MaxItems=16 - // +optional - APILoadBalancerIPs []IP `json:"apiLoadBalancerIPs,omitempty"` - - // ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. - // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. - // Entries in the ingressLoadBalancerIPs must be unique. - // A maximum of 16 IP addresses are permitted. - // +kubebuilder:validation:Format=ip - // +listType=set - // +kubebuilder:validation:MaxItems=16 - // +optional - IngressLoadBalancerIPs []IP `json:"ingressLoadBalancerIPs,omitempty"` -} - -// BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform. -// +union -type BareMetalPlatformLoadBalancer struct { - // type defines the type of load balancer used by the cluster on BareMetal platform - // which can be a user-managed or openshift-managed load balancer - // that is to be used for the OpenShift API and Ingress endpoints. - // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing - // defined in the machine config operator will be deployed. - // When set to UserManaged these static pods will not be deployed and it is expected that - // the load balancer is configured out of band by the deployer. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The default value is OpenShiftManagedDefault. - // +default="OpenShiftManagedDefault" - // +kubebuilder:default:="OpenShiftManagedDefault" - // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" - // +optional - // +unionDiscriminator - Type PlatformLoadBalancerType `json:"type,omitempty"` -} - -// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. -// This only includes fields that can be modified in the cluster. -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" -type BareMetalPlatformSpec struct { - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.apiServerInternalIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - APIServerInternalIPs []IP `json:"apiServerInternalIPs"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.ingressIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - IngressIPs []IP `json:"ingressIPs"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster - // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, - // for example "10.0.0.0/8" or "fd00::/8". - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. -// For more information about the network architecture used with the BareMetal platform type, see: -// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md -type BareMetalPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - // - // Deprecated: Use APIServerInternalIPs instead. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. In dual - // stack clusters this list contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - APIServerInternalIPs []string `json:"apiServerInternalIPs"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - // - // Deprecated: Use IngressIPs instead. - IngressIP string `json:"ingressIP,omitempty"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. In dual stack clusters this list - // contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - IngressIPs []string `json:"ingressIPs"` - - // nodeDNSIP is the IP address for the internal DNS used by the - // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` - // provides name resolution for the nodes themselves. There is no DNS-as-a-service for - // BareMetal deployments. In order to minimize necessary changes to the - // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames - // to the nodes in the cluster. - NodeDNSIP string `json:"nodeDNSIP,omitempty"` - - // loadBalancer defines how the load balancer used by the cluster is configured. - // +default={"type": "OpenShiftManagedDefault"} - // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer - // +optional - LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform. -// +union -type OpenStackPlatformLoadBalancer struct { - // type defines the type of load balancer used by the cluster on OpenStack platform - // which can be a user-managed or openshift-managed load balancer - // that is to be used for the OpenShift API and Ingress endpoints. - // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing - // defined in the machine config operator will be deployed. - // When set to UserManaged these static pods will not be deployed and it is expected that - // the load balancer is configured out of band by the deployer. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The default value is OpenShiftManagedDefault. - // +default="OpenShiftManagedDefault" - // +kubebuilder:default:="OpenShiftManagedDefault" - // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" - // +optional - // +unionDiscriminator - Type PlatformLoadBalancerType `json:"type,omitempty"` -} - -// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. -// This only includes fields that can be modified in the cluster. -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" -type OpenStackPlatformSpec struct { - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.apiServerInternalIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - APIServerInternalIPs []IP `json:"apiServerInternalIPs"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.ingressIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - IngressIPs []IP `json:"ingressIPs"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster - // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, - // for example "10.0.0.0/8" or "fd00::/8". - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. -type OpenStackPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - // - // Deprecated: Use APIServerInternalIPs instead. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. In dual - // stack clusters this list contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - APIServerInternalIPs []string `json:"apiServerInternalIPs"` - - // cloudName is the name of the desired OpenStack cloud in the - // client configuration file (`clouds.yaml`). - CloudName string `json:"cloudName,omitempty"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - // - // Deprecated: Use IngressIPs instead. - IngressIP string `json:"ingressIP,omitempty"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. In dual stack clusters this list - // contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - IngressIPs []string `json:"ingressIPs"` - - // nodeDNSIP is the IP address for the internal DNS used by the - // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` - // provides name resolution for the nodes themselves. There is no DNS-as-a-service for - // OpenStack deployments. In order to minimize necessary changes to the - // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames - // to the nodes in the cluster. - NodeDNSIP string `json:"nodeDNSIP,omitempty"` - - // loadBalancer defines how the load balancer used by the cluster is configured. - // +default={"type": "OpenShiftManagedDefault"} - // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +optional - LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform. -// +union -type OvirtPlatformLoadBalancer struct { - // type defines the type of load balancer used by the cluster on Ovirt platform - // which can be a user-managed or openshift-managed load balancer - // that is to be used for the OpenShift API and Ingress endpoints. - // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing - // defined in the machine config operator will be deployed. - // When set to UserManaged these static pods will not be deployed and it is expected that - // the load balancer is configured out of band by the deployer. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The default value is OpenShiftManagedDefault. - // +default="OpenShiftManagedDefault" - // +kubebuilder:default:="OpenShiftManagedDefault" - // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" - // +optional - // +unionDiscriminator - Type PlatformLoadBalancerType `json:"type,omitempty"` -} - -// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. -// This only includes fields that can be modified in the cluster. -type OvirtPlatformSpec struct{} - -// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. -type OvirtPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - // - // Deprecated: Use APIServerInternalIPs instead. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. In dual - // stack clusters this list contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set - APIServerInternalIPs []string `json:"apiServerInternalIPs"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - // - // Deprecated: Use IngressIPs instead. - IngressIP string `json:"ingressIP,omitempty"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. In dual stack clusters this list - // contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set - IngressIPs []string `json:"ingressIPs"` - - // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. - NodeDNSIP string `json:"nodeDNSIP,omitempty"` - - // loadBalancer defines how the load balancer used by the cluster is configured. - // +default={"type": "OpenShiftManagedDefault"} - // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer - // +optional - LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` -} - -// VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. -// +union -type VSpherePlatformLoadBalancer struct { - // type defines the type of load balancer used by the cluster on VSphere platform - // which can be a user-managed or openshift-managed load balancer - // that is to be used for the OpenShift API and Ingress endpoints. - // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing - // defined in the machine config operator will be deployed. - // When set to UserManaged these static pods will not be deployed and it is expected that - // the load balancer is configured out of band by the deployer. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The default value is OpenShiftManagedDefault. - // +default="OpenShiftManagedDefault" - // +kubebuilder:default:="OpenShiftManagedDefault" - // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" - // +optional - // +unionDiscriminator - Type PlatformLoadBalancerType `json:"type,omitempty"` -} - -// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and -// the vCenter topology of that failure domain. -type VSpherePlatformFailureDomainSpec struct { - // name defines the arbitrary but unique name - // of a failure domain. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - Name string `json:"name"` - - // region defines the name of a region tag that will - // be attached to a vCenter datacenter. The tag - // category in vCenter must be named openshift-region. - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required - Region string `json:"region"` - - // zone defines the name of a zone tag that will - // be attached to a vCenter cluster. The tag - // category in vCenter must be named openshift-zone. - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required - Zone string `json:"zone"` - - // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=255 - // --- - // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname - Server string `json:"server"` - - // Topology describes a given failure domain using vSphere constructs - // +kubebuilder:validation:Required - Topology VSpherePlatformTopology `json:"topology"` -} - -// VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, -// computeCluster, networks, datastore and resourcePool - to provision virtual machines. -type VSpherePlatformTopology struct { - // datacenter is the name of vCenter datacenter in which virtual machines will be located. - // The maximum length of the datacenter name is 80 characters. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=80 - Datacenter string `json:"datacenter"` - - // computeCluster the absolute path of the vCenter cluster - // in which virtual machine will be located. - // The absolute path is of the form //host/. - // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` - ComputeCluster string `json:"computeCluster"` - - // networks is the list of port group network names within this failure domain. - // Currently, we only support a single interface per RHCOS virtual machine. - // The available networks (port groups) can be listed using - // `govc ls 'network/*'` - // The single interface should be the absolute path of the form - // //network/. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxItems=1 - // +kubebuilder:validation:MinItems=1 - // +listType=atomic - Networks []string `json:"networks"` - - // datastore is the absolute path of the datastore in which the - // virtual machine is located. - // The absolute path is of the form //datastore/ - // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` - Datastore string `json:"datastore"` - - // resourcePool is the absolute path of the resource pool where virtual machines will be - // created. The absolute path is of the form //host//Resources/. - // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:Pattern=`^/.*?/host/.*?/Resources.*` - // +optional - ResourcePool string `json:"resourcePool,omitempty"` - - // folder is the absolute path of the folder where - // virtual machines are located. The absolute path - // is of the form //vm/. - // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` - // +optional - Folder string `json:"folder,omitempty"` - - // template is the full inventory path of the virtual machine or template - // that will be cloned when creating new machines in this failure domain. - // The maximum length of the path is 2048 characters. - // - // When omitted, the template will be calculated by the control plane - // machineset operator based on the region and zone defined in - // VSpherePlatformFailureDomainSpec. - // For example, for zone=zonea, region=region1, and infrastructure name=test, - // the template path would be calculated as //vm/test-rhcos-region1-zonea. - // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` - // +optional - Template string `json:"template,omitempty"` -} - -// VSpherePlatformVCenterSpec stores the vCenter connection fields. -// This is used by the vSphere CCM. -type VSpherePlatformVCenterSpec struct { - - // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=255 - // --- - // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname - Server string `json:"server"` - - // port is the TCP port that will be used to communicate to - // the vCenter endpoint. - // When omitted, this means the user has no opinion and - // it is up to the platform to choose a sensible default, - // which is subject to change over time. - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=32767 - // +optional - Port int32 `json:"port,omitempty"` - - // The vCenter Datacenters in which the RHCOS - // vm guests are located. This field will - // be used by the Cloud Controller Manager. - // Each datacenter listed here should be used within - // a topology. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - // +listType=set - Datacenters []string `json:"datacenters"` -} - -// VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for -// including and excluding IP ranges in the cloud provider. -// This would be used for example when multiple network adapters are attached to -// a guest to help determine which IP address the cloud config manager should use -// for the external and internal node networking. -type VSpherePlatformNodeNetworkingSpec struct { - // networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs - // that will be used in respective status.addresses fields. - // --- - // + Validation is applied via a patch, we validate the format as cidr - // +listType=set - // +optional - NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` - - // network VirtualMachine's VM Network names that will be used to when searching - // for status.addresses fields. Note that if internal.networkSubnetCIDR and - // external.networkSubnetCIDR are not set, then the vNIC associated to this network must - // only have a single IP address assigned to it. - // The available networks (port groups) can be listed using - // `govc ls 'network/*'` - // +optional - Network string `json:"network,omitempty"` - - // excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting - // the IP address from the VirtualMachine's VM for use in the status.addresses fields. - // --- - // + Validation is applied via a patch, we validate the format as cidr - // +listType=atomic - // +optional - ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` -} - -// VSpherePlatformNodeNetworking holds the external and internal node networking spec. -type VSpherePlatformNodeNetworking struct { - // external represents the network configuration of the node that is externally routable. - // +optional - External VSpherePlatformNodeNetworkingSpec `json:"external"` - // internal represents the network configuration of the node that is routable only within the cluster. - // +optional - Internal VSpherePlatformNodeNetworkingSpec `json:"internal"` -} - -// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. -// In the future the cloud provider operator, storage operator and machine operator will -// use these fields for configuration. -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" -type VSpherePlatformSpec struct { - // vcenters holds the connection details for services to communicate with vCenter. - // Currently, only a single vCenter is supported. - // --- - // + If VCenters is not defined use the existing cloud-config configmap defined - // + in openshift-config. - // +kubebuilder:validation:MinItems=0 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 - // +listType=atomic - // +optional - VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"` - - // failureDomains contains the definition of region, zone and the vCenter topology. - // If this is omitted failure domains (regions and zones) will not be used. - // +listType=map - // +listMapKey=name - // +optional - FailureDomains []VSpherePlatformFailureDomainSpec `json:"failureDomains,omitempty"` - - // nodeNetworking contains the definition of internal and external network constraints for - // assigning the node's networking. - // If this field is omitted, networking defaults to the legacy - // address selection behavior which is to only support a single address and - // return the first one found. - // +optional - NodeNetworking VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.apiServerInternalIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - APIServerInternalIPs []IP `json:"apiServerInternalIPs"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. - // In dual stack clusters this list contains two IP addresses, one from IPv4 - // family and one from IPv6. - // In single stack clusters a single IP address is expected. - // When omitted, values from the status.ingressIPs will be used. - // Once set, the list cannot be completely removed (but its second entry can). - // - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - // +optional - IngressIPs []IP `json:"ingressIPs"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster - // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, - // for example "10.0.0.0/8" or "fd00::/8". - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. -type VSpherePlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - // - // Deprecated: Use APIServerInternalIPs instead. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. In dual - // stack clusters this list contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - APIServerInternalIPs []string `json:"apiServerInternalIPs"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - // - // Deprecated: Use IngressIPs instead. - IngressIP string `json:"ingressIP,omitempty"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. In dual stack clusters this list - // contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=atomic - IngressIPs []string `json:"ingressIPs"` - - // nodeDNSIP is the IP address for the internal DNS used by the - // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` - // provides name resolution for the nodes themselves. There is no DNS-as-a-service for - // vSphere deployments. In order to minimize necessary changes to the - // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames - // to the nodes in the cluster. - NodeDNSIP string `json:"nodeDNSIP,omitempty"` - - // loadBalancer defines how the load balancer used by the cluster is configured. - // +default={"type": "OpenShiftManagedDefault"} - // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer - // +optional - LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` - - // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=atomic - // +kubebuilder:validation:MaxItems=32 - // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" - // +optional - MachineNetworks []CIDR `json:"machineNetworks"` -} - -// IBMCloudServiceEndpoint stores the configuration of a custom url to -// override existing defaults of IBM Cloud Services. -type IBMCloudServiceEndpoint struct { - // name is the name of the IBM Cloud service. - // Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. - // For example, the IBM Cloud Private IAM service could be configured with the - // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured - // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` - // - // +kubebuilder:validation:Required - Name IBMCloudServiceName `json:"name"` - - // url is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" - URL string `json:"url"` -} - -// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. -// This only includes fields that can be modified in the cluster. -type IBMCloudPlatformSpec struct{} - -// IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. -type IBMCloudPlatformStatus struct { - // Location is where the cluster has been deployed - Location string `json:"location,omitempty"` - - // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. - ResourceGroupName string `json:"resourceGroupName,omitempty"` - - // ProviderType indicates the type of cluster that was created - ProviderType IBMCloudProviderType `json:"providerType,omitempty"` - - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing - // the DNS zone for the cluster's base domain - CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone - // for the cluster's base domain - DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` - - // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of an IBM Cloud service. These endpoints are consumed by - // components within the cluster to reach the respective IBM Cloud Services. - // +listType=map - // +listMapKey=name - // +optional - ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` -} - -// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. -// This only includes fields that can be modified in the cluster. -type KubevirtPlatformSpec struct{} - -// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. -type KubevirtPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - IngressIP string `json:"ingressIP,omitempty"` -} - -// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. -// This only includes fields that can be modified in the cluster. -type EquinixMetalPlatformSpec struct{} - -// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. -type EquinixMetalPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - IngressIP string `json:"ingressIP,omitempty"` -} - -// PowervsServiceEndpoint stores the configuration of a custom url to -// override existing defaults of PowerVS Services. -type PowerVSServiceEndpoint struct { - // name is the name of the Power VS service. - // Few of the services are - // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` - Name string `json:"name"` - - // url is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Format=uri - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` -} - -// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. -// This only includes fields that can be modified in the cluster. -type PowerVSPlatformSpec struct { - // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of a Power VS service. - // +listType=map - // +listMapKey=name - // +optional - ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` -} - -// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceGroup) || has(self.resourceGroup)",message="cannot unset resourceGroup once set" -type PowerVSPlatformStatus struct { - // region holds the default Power VS region for new Power VS resources created by the cluster. - Region string `json:"region"` - - // zone holds the default zone for the new Power VS resources created by the cluster. - // Note: Currently only single-zone OCP clusters are supported - Zone string `json:"zone"` - - // resourceGroup is the resource group name for new IBMCloud resources created for a cluster. - // The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. - // More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - // When omitted, the image registry operator won't be able to configure storage, - // which results in the image registry cluster operator not being in an available state. - // - // +kubebuilder:validation:Pattern=^[a-zA-Z0-9-_ ]+$ - // +kubebuilder:validation:MaxLength=40 - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="resourceGroup is immutable once set" - // +optional - ResourceGroup string `json:"resourceGroup"` - - // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of a Power VS service. - // +listType=map - // +listMapKey=name - // +optional - ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` - - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing - // the DNS zone for the cluster's base domain - CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone - // for the cluster's base domain - DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` -} - -// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. -// This only includes fields that can be modified in the cluster. -type AlibabaCloudPlatformSpec struct{} - -// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. -type AlibabaCloudPlatformStatus struct { - // region specifies the region for Alibaba Cloud resources created for the cluster. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` - // +required - Region string `json:"region"` - // resourceGroupID is the ID of the resource group for the cluster. - // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$` - // +optional - ResourceGroupID string `json:"resourceGroupID,omitempty"` - // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. - // +kubebuilder:validation:MaxItems=20 - // +listType=map - // +listMapKey=key - // +optional - ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` -} - -// AlibabaCloudResourceTag is the set of tags to add to apply to resources. -type AlibabaCloudResourceTag struct { - // key is the key of the tag. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +required - Key string `json:"key"` - // value is the value of the tag. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +required - Value string `json:"value"` -} - -// NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform. -// +union -type NutanixPlatformLoadBalancer struct { - // type defines the type of load balancer used by the cluster on Nutanix platform - // which can be a user-managed or openshift-managed load balancer - // that is to be used for the OpenShift API and Ingress endpoints. - // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing - // defined in the machine config operator will be deployed. - // When set to UserManaged these static pods will not be deployed and it is expected that - // the load balancer is configured out of band by the deployer. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The default value is OpenShiftManagedDefault. - // +default="OpenShiftManagedDefault" - // +kubebuilder:default:="OpenShiftManagedDefault" - // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" - // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" - // +optional - // +unionDiscriminator - Type PlatformLoadBalancerType `json:"type,omitempty"` -} - -// NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. -// This only includes fields that can be modified in the cluster. -type NutanixPlatformSpec struct { - // prismCentral holds the endpoint address and port to access the Nutanix Prism Central. - // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. - // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the - // proxy spec.noProxy list. - // +kubebuilder:validation:Required - PrismCentral NutanixPrismEndpoint `json:"prismCentral"` - - // prismElements holds one or more endpoint address and port data to access the Nutanix - // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one - // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) - // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) - // spread over multiple Prism Elements (clusters) of the Prism Central. - // +kubebuilder:validation:Required - // +listType=map - // +listMapKey=name - PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` - - // failureDomains configures failure domains information for the Nutanix platform. - // When set, the failure domains defined here may be used to spread Machines across - // prism element clusters to improve fault tolerance of the cluster. - // +listType=map - // +listMapKey=name - // +optional - FailureDomains []NutanixFailureDomain `json:"failureDomains"` -} - -// NutanixFailureDomain configures failure domain information for the Nutanix platform. -type NutanixFailureDomain struct { - // name defines the unique name of a failure domain. - // Name is required and must be at most 64 characters in length. - // It must consist of only lower case alphanumeric characters and hyphens (-). - // It must start and end with an alphanumeric character. - // This value is arbitrary and is used to identify the failure domain within the platform. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=64 - // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` - Name string `json:"name"` - - // cluster is to identify the cluster (the Prism Element under management of the Prism Central), - // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained - // from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required - Cluster NutanixResourceIdentifier `json:"cluster"` - - // subnets holds a list of identifiers (one or more) of the cluster's network subnets - // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be - // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=1 - // +listType=map - // +listMapKey=type - Subnets []NutanixResourceIdentifier `json:"subnets"` -} - -// NutanixIdentifierType is an enumeration of different resource identifier types. -// +kubebuilder:validation:Enum:=UUID;Name -type NutanixIdentifierType string - -const ( - // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. - NutanixIdentifierUUID NutanixIdentifierType = "UUID" - - // NutanixIdentifierName is a resource identifier identifying the object by Name. - NutanixIdentifierName NutanixIdentifierType = "Name" -) - -// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) -// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'UUID' ? has(self.uuid) : !has(self.uuid)",message="uuid configuration is required when type is UUID, and forbidden otherwise" -// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Name' ? has(self.name) : !has(self.name)",message="name configuration is required when type is Name, and forbidden otherwise" -// +union -type NutanixResourceIdentifier struct { - // type is the identifier type to use for this resource. - // +unionDiscriminator - // +kubebuilder:validation:Required - Type NutanixIdentifierType `json:"type"` - - // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. - // +optional - UUID *string `json:"uuid,omitempty"` - - // name is the resource name in the PC. It cannot be empty if the type is Name. - // +optional - Name *string `json:"name,omitempty"` -} - -// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) -type NutanixPrismEndpoint struct { - // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=256 - Address string `json:"address"` - - // port is the port number to access the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=65535 - Port int32 `json:"port"` -} - -// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) -type NutanixPrismElementEndpoint struct { - // name is the name of the Prism Element (cluster). This value will correspond with - // the cluster field configured on other resources (eg Machines, PVCs, etc). - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=256 - Name string `json:"name"` - - // endpoint holds the endpoint address and port data of the Prism Element (cluster). - // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. - // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the - // proxy spec.noProxy list. - // +kubebuilder:validation:Required - Endpoint NutanixPrismEndpoint `json:"endpoint"` -} - -// NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. -type NutanixPlatformStatus struct { - // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used - // by components inside the cluster, like kubelets using the infrastructure rather - // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI - // points to. It is the IP for a self-hosted load balancer in front of the API servers. - // - // Deprecated: Use APIServerInternalIPs instead. - APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` - - // apiServerInternalIPs are the IP addresses to contact the Kubernetes API - // server that can be used by components inside the cluster, like kubelets - // using the infrastructure rather than Kubernetes networking. These are the - // IPs for a self-hosted load balancer in front of the API servers. In dual - // stack clusters this list contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set - APIServerInternalIPs []string `json:"apiServerInternalIPs"` - - // ingressIP is an external IP which routes to the default ingress controller. - // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. - // - // Deprecated: Use IngressIPs instead. - IngressIP string `json:"ingressIP,omitempty"` - - // ingressIPs are the external IPs which route to the default ingress - // controller. The IPs are suitable targets of a wildcard DNS record used to - // resolve default route host names. In dual stack clusters this list - // contains two IPs otherwise only one. - // - // +kubebuilder:validation:Format=ip - // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set - IngressIPs []string `json:"ingressIPs"` - - // loadBalancer defines how the load balancer used by the cluster is configured. - // +default={"type": "OpenShiftManagedDefault"} - // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer - // +optional - LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InfrastructureList is -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type InfrastructureList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Infrastructure `json:"items"` -} - -// IP is an IP address (for example, "10.0.0.0" or "fd00::"). -// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address" -// +kubebuilder:validation:MaxLength:=39 -// +kubebuilder:validation:MinLength:=1 -type IP string - -// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). -// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" -// +kubebuilder:validation:MaxLength:=43 -// +kubebuilder:validation:MinLength:=1 -type CIDR string diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go deleted file mode 100644 index 302913a1..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ /dev/null @@ -1,340 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Ingress holds cluster-wide information about ingress, including the default ingress domain -// used for routes. The canonical name is `cluster`. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=ingresses,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Ingress struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec IngressSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status IngressStatus `json:"status"` -} - -type IngressSpec struct { - // domain is used to generate a default host name for a route when the - // route's host name is empty. The generated host name will follow this - // pattern: "..". - // - // It is also used as the default wildcard domain suffix for ingress. The - // default ingresscontroller domain will follow this pattern: "*.". - // - // Once set, changing domain is not currently supported. - Domain string `json:"domain"` - - // appsDomain is an optional domain to use instead of the one specified - // in the domain field when a Route is created without specifying an explicit - // host. If appsDomain is nonempty, this value is used to generate default - // host values for Route. Unlike domain, appsDomain may be modified after - // installation. - // This assumes a new ingresscontroller has been setup with a wildcard - // certificate. - // +optional - AppsDomain string `json:"appsDomain,omitempty"` - - // componentRoutes is an optional list of routes that are managed by OpenShift components - // that a cluster-admin is able to configure the hostname and serving certificate for. - // The namespace and name of each route in this list should match an existing entry in the - // status.componentRoutes list. - // - // To determine the set of configurable Routes, look at namespace and name of entries in the - // .status.componentRoutes list, where participating operators write the status of - // configurable routes. - // +optional - // +listType=map - // +listMapKey=namespace - // +listMapKey=name - ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` - - // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes - // matching the domainPattern/s and namespaceSelector/s that are specified in the policy. - // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route - // annotation, and affect route admission. - // - // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: - // "haproxy.router.openshift.io/hsts_header" - // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains - // - // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, - // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route - // is rejected. - // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies - // determines the route's admission status. - // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, - // then it may use any HSTS Policy annotation. - // - // The HSTS policy configuration may be changed after routes have already been created. An update to a previously - // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. - // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. - // - // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. - // +optional - RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"` - - // loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure - // provider of the current cluster and are required for Ingress Controller to work on OpenShift. - // +optional - LoadBalancer LoadBalancer `json:"loadBalancer,omitempty"` -} - -// IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider -// of the current cluster. Since these are used at spec-level for the underlying cluster, it -// is supposed that only one of the spec structs is set. -// +union -type IngressPlatformSpec struct { - // type is the underlying infrastructure provider for the cluster. - // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - // and must handle unrecognized platforms as None if they do not support that platform. - // - // +unionDiscriminator - Type PlatformType `json:"type"` - - // aws contains settings specific to the Amazon Web Services infrastructure provider. - // +optional - AWS *AWSIngressSpec `json:"aws,omitempty"` -} - -type LoadBalancer struct { - // platform holds configuration specific to the underlying - // infrastructure provider for the ingress load balancers. - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // +optional - Platform IngressPlatformSpec `json:"platform,omitempty"` -} - -// AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. -// This only includes fields that can be modified in the cluster. -// +union -type AWSIngressSpec struct { - // type allows user to set a load balancer type. - // When this field is set the default ingresscontroller will get created using the specified LBType. - // If this field is not set then the default ingress controller of LBType Classic will be created. - // Valid values are: - // - // * "Classic": A Classic Load Balancer that makes routing decisions at either - // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See - // the following for additional details: - // - // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - // - // * "NLB": A Network Load Balancer that makes routing decisions at the - // transport layer (TCP/SSL). See the following for additional details: - // - // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb - // +unionDiscriminator - // +kubebuilder:validation:Enum:=NLB;Classic - // +kubebuilder:validation:Required - Type AWSLBType `json:"type,omitempty"` -} - -type AWSLBType string - -const ( - // NLB is the Network Load Balancer Type of AWS. Using NLB one can set NLB load balancer type for the default ingress controller. - NLB AWSLBType = "NLB" - - // Classic is the Classic Load Balancer Type of AWS. Using CLassic one can set Classic load balancer type for the default ingress controller. - Classic AWSLBType = "Classic" -) - -// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. -// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" -// +kubebuilder:validation:MinLength=1 -// +kubebuilder:validation:MaxLength=512 -type ConsumingUser string - -// Hostname is a host name as defined by RFC-1123. -// + --- -// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it -// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric -// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. -// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ -// + -// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, -// + except that it allows hostnames longer than the maximum length: -// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ -// + -// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname -// + was saved via validation by the incorrect left operand of the | operator. -// + -// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` -type Hostname string - -type IngressStatus struct { - // componentRoutes is where participating operators place the current route status for routes whose - // hostnames and serving certificates can be customized by the cluster-admin. - // +optional - // +listType=map - // +listMapKey=namespace - // +listMapKey=name - ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` - - // defaultPlacement is set at installation time to control which - // nodes will host the ingress router pods by default. The options are - // control-plane nodes or worker nodes. - // - // This field works by dictating how the Cluster Ingress Operator will - // consider unset replicas and nodePlacement fields in IngressController - // resources when creating the corresponding Deployments. - // - // See the documentation for the IngressController replicas and nodePlacement - // fields for more information. - // - // When omitted, the default value is Workers - // - // +kubebuilder:validation:Enum:="ControlPlane";"Workers";"" - // +optional - DefaultPlacement DefaultPlacement `json:"defaultPlacement"` -} - -// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. -type ComponentRouteSpec struct { - // namespace is the namespace of the route to customize. - // - // The namespace and name of this componentRoute must match a corresponding - // entry in the list of status.componentRoutes if the route is to be customized. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required - // +required - Namespace string `json:"namespace"` - - // name is the logical name of the route to customize. - // - // The namespace and name of this componentRoute must match a corresponding - // entry in the list of status.componentRoutes if the route is to be customized. - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - - // hostname is the hostname that should be used by the route. - // +kubebuilder:validation:Required - // +required - Hostname Hostname `json:"hostname"` - - // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. - // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. - // If the custom hostname uses the default routing suffix of the cluster, - // the Secret specification for a serving certificate will not be needed. - // +optional - ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` -} - -// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. -type ComponentRouteStatus struct { - // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace - // ensures that no two components will conflict and the same component can be installed multiple times. - // - // The namespace and name of this componentRoute must match a corresponding - // entry in the list of spec.componentRoutes if the route is to be customized. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required - // +required - Namespace string `json:"namespace"` - - // name is the logical name of the route to customize. It does not have to be the actual name of a route resource - // but it cannot be renamed. - // - // The namespace and name of this componentRoute must match a corresponding - // entry in the list of spec.componentRoutes if the route is to be customized. - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - - // defaultHostname is the hostname of this route prior to customization. - // +kubebuilder:validation:Required - // +required - DefaultHostname Hostname `json:"defaultHostname"` - - // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. - // +kubebuilder:validation:MaxItems=5 - // +optional - ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` - - // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single - // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. - // +kubebuilder:validation:MinItems=1 - // +optional - CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` - - // conditions are used to communicate the state of the componentRoutes entry. - // - // Supported conditions include Available, Degraded and Progressing. - // - // If available is true, the content served by the route can be accessed by users. This includes cases - // where a default may continue to serve content while the customized route specified by the cluster-admin - // is being configured. - // - // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. - // The currentHostnames field may or may not be in effect. - // - // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. - // +optional - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty"` - - // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required - // +required - RelatedObjects []ObjectReference `json:"relatedObjects"` -} - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:compatibility-gen:level=1 -type IngressList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Ingress `json:"items"` -} - -// DefaultPlacement defines the default placement of ingress router pods. -type DefaultPlacement string - -const ( - // "Workers" is for having router pods placed on worker nodes by default. - DefaultPlacementWorkers DefaultPlacement = "Workers" - - // "ControlPlane" is for having router pods placed on control-plane nodes by default. - DefaultPlacementControlPlane DefaultPlacement = "ControlPlane" -) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go deleted file mode 100644 index 211d5c08..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ /dev/null @@ -1,309 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. -// Please view network.spec for an explanation on what applies when configuring this resource. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:compatibility-gen:level=1 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=networks,scope=Cluster -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Network struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration. - // As a general rule, this SHOULD NOT be read directly. Instead, you should - // consume the NetworkStatus, as it indicates the currently deployed configuration. - // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. - // +kubebuilder:validation:Required - // +required - Spec NetworkSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status NetworkStatus `json:"status"` -} - -// NetworkSpec is the desired network configuration. -// As a general rule, this SHOULD NOT be read directly. Instead, you should -// consume the NetworkStatus, as it indicates the currently deployed configuration. -// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" -type NetworkSpec struct { - // IP address pool to use for pod IPs. - // This field is immutable after installation. - // +listType=atomic - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` - - // IP address pool for services. - // Currently, we only support a single entry here. - // This field is immutable after installation. - // +listType=atomic - ServiceNetwork []string `json:"serviceNetwork"` - - // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). - // This should match a value that the cluster-network-operator understands, - // or else no networking will be installed. - // Currently supported values are: - // - OpenShiftSDN - // This field is immutable after installation. - NetworkType string `json:"networkType"` - - // externalIP defines configuration for controllers that - // affect Service.ExternalIP. If nil, then ExternalIP is - // not allowed to be set. - // +optional - ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` - - // The port range allowed for Services of type NodePort. - // If not specified, the default of 30000-32767 will be used. - // Such Services without a NodePort specified will have one - // automatically allocated from this range. - // This parameter can be updated after the cluster is - // installed. - // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` - ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` - - // networkDiagnostics defines network diagnostics configuration. - // - // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. - // If networkDiagnostics is not specified or is empty, - // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, - // the network diagnostics feature will be disabled. - // - // +optional - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig - NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` -} - -// NetworkStatus is the current network configuration. -type NetworkStatus struct { - // IP address pool to use for pod IPs. - // +listType=atomic - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` - - // IP address pool for services. - // Currently, we only support a single entry here. - // +listType=atomic - ServiceNetwork []string `json:"serviceNetwork,omitempty"` - - // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - NetworkType string `json:"networkType,omitempty"` - - // ClusterNetworkMTU is the MTU for inter-pod networking. - ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` - - // Migration contains the cluster network migration configuration. - Migration *NetworkMigration `json:"migration,omitempty"` - - // conditions represents the observations of a network.config current state. - // Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", - // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse", - // "NetworkTypeMigrationOriginalCNIPurged" and "NetworkDiagnosticsAvailable" - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - // +openshift:enable:FeatureGate=NetworkLiveMigration - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` -} - -// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs -// are allocated. -type ClusterNetworkEntry struct { - // The complete block for pod IPs. - CIDR string `json:"cidr"` - - // The size (prefix) of block to allocate to each node. If this - // field is not used by the plugin, it can be left unset. - // +kubebuilder:validation:Minimum=0 - // +optional - HostPrefix uint32 `json:"hostPrefix,omitempty"` -} - -// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field -// of a Service resource. -type ExternalIPConfig struct { - // policy is a set of restrictions applied to the ExternalIP field. - // If nil or empty, then ExternalIP is not allowed to be set. - // +optional - Policy *ExternalIPPolicy `json:"policy,omitempty"` - - // autoAssignCIDRs is a list of CIDRs from which to automatically assign - // Service.ExternalIP. These are assigned when the service is of type - // LoadBalancer. In general, this is only useful for bare-metal clusters. - // In Openshift 3.x, this was misleadingly called "IngressIPs". - // Automatically assigned External IPs are not affected by any - // ExternalIPPolicy rules. - // Currently, only one entry may be provided. - // +optional - // +listType=atomic - AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` -} - -// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP -// field in a Service. If the zero struct is supplied, then none are permitted. -// The policy controller always allows automatically assigned external IPs. -type ExternalIPPolicy struct { - // allowedCIDRs is the list of allowed CIDRs. - // +listType=atomic - AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` - - // rejectedCIDRs is the list of disallowed CIDRs. These take precedence - // over allowedCIDRs. - // +optional - // +listType=atomic - RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type NetworkList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Network `json:"items"` -} - -// NetworkMigration represents the cluster network configuration. -type NetworkMigration struct { - // NetworkType is the target plugin that is to be deployed. - // Currently supported values are: OpenShiftSDN, OVNKubernetes - // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} - // +optional - NetworkType string `json:"networkType,omitempty"` - - // MTU contains the MTU migration configuration. - // +optional - MTU *MTUMigration `json:"mtu,omitempty"` -} - -// MTUMigration contains infomation about MTU migration. -type MTUMigration struct { - // Network contains MTU migration configuration for the default network. - // +optional - Network *MTUMigrationValues `json:"network,omitempty"` - - // Machine contains MTU migration configuration for the machine's uplink. - // +optional - Machine *MTUMigrationValues `json:"machine,omitempty"` -} - -// MTUMigrationValues contains the values for a MTU migration. -type MTUMigrationValues struct { - // To is the MTU to migrate to. - // +kubebuilder:validation:Minimum=0 - To *uint32 `json:"to"` - - // From is the MTU to migrate from. - // +kubebuilder:validation:Minimum=0 - // +optional - From *uint32 `json:"from,omitempty"` -} - -// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes -// Valid values are "", "All", "Disabled". -// +kubebuilder:validation:Enum:="";All;Disabled -type NetworkDiagnosticsMode string - -const ( - // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left - // to choose reasonable default. The current default is All and is a subject to change over time. - NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = "" - // NetworkDiagnosticsAll means that all network diagnostics checks are enabled - NetworkDiagnosticsAll NetworkDiagnosticsMode = "All" - // NetworkDiagnosticsDisabled means that network diagnostics is disabled - NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled" -) - -// NetworkDiagnostics defines network diagnostics configuration - -type NetworkDiagnostics struct { - // mode controls the network diagnostics mode - // - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // The current default is All. - // - // +optional - Mode NetworkDiagnosticsMode `json:"mode"` - - // sourcePlacement controls the scheduling of network diagnostics source deployment - // - // See NetworkDiagnosticsSourcePlacement for more details about default values. - // - // +optional - SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"` - - // targetPlacement controls the scheduling of network diagnostics target daemonset - // - // See NetworkDiagnosticsTargetPlacement for more details about default values. - // - // +optional - TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"` -} - -// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components -type NetworkDiagnosticsSourcePlacement struct { - // nodeSelector is the node selector applied to network diagnostics components - // - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // The current default is `kubernetes.io/os: linux`. - // - // +optional - NodeSelector map[string]string `json:"nodeSelector"` - - // tolerations is a list of tolerations applied to network diagnostics components - // - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // The current default is an empty list. - // - // +optional - // +listType=atomic - Tolerations []corev1.Toleration `json:"tolerations"` -} - -// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components -type NetworkDiagnosticsTargetPlacement struct { - // nodeSelector is the node selector applied to network diagnostics components - // - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // The current default is `kubernetes.io/os: linux`. - // - // +optional - NodeSelector map[string]string `json:"nodeSelector"` - - // tolerations is a list of tolerations applied to network diagnostics components - // - // When omitted, this means the user has no opinion and the platform is left - // to choose reasonable defaults. These defaults are subject to change over time. - // The current default is `- operator: "Exists"` which means that all taints are tolerated. - // - // +optional - // +listType=atomic - Tolerations []corev1.Toleration `json:"tolerations"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go deleted file mode 100644 index 8bf099bd..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node holds cluster-wide information about node specific features. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=nodes,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Node struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec NodeSpec `json:"spec"` - - // status holds observed values. - // +optional - Status NodeStatus `json:"status"` -} - -type NodeSpec struct { - // CgroupMode determines the cgroups version on the node - // +optional - CgroupMode CgroupMode `json:"cgroupMode,omitempty"` - - // WorkerLatencyProfile determins the how fast the kubelet is updating - // the status and corresponding reaction of the cluster - // +optional - WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` -} - -type NodeStatus struct{} - -// +kubebuilder:validation:Enum=v1;v2;"" -type CgroupMode string - -const ( - CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift - CgroupModeV1 CgroupMode = "v1" - CgroupModeV2 CgroupMode = "v2" - CgroupModeDefault CgroupMode = CgroupModeV1 -) - -// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction -type WorkerLatencyProfileType string - -const ( - // Medium Kubelet Update Frequency (heart-beat) and Average Reaction Time to unresponsive Node - MediumUpdateAverageReaction WorkerLatencyProfileType = "MediumUpdateAverageReaction" - - // Low Kubelet Update Frequency (heart-beat) and Slow Reaction Time to unresponsive Node - LowUpdateSlowReaction WorkerLatencyProfileType = "LowUpdateSlowReaction" - - // Default values of relavent Kubelet, Kube Controller Manager and Kube API Server - DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default" -) - -const ( - // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type - DefaultNodeStatusUpdateFrequency = 10 * time.Second - // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type - DefaultNodeMonitorGracePeriod = 40 * time.Second - // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type - DefaultNotReadyTolerationSeconds = 300 - // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type - DefaultUnreachableTolerationSeconds = 300 - - // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type - MediumNodeStatusUpdateFrequency = 20 * time.Second - // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type - MediumNodeMonitorGracePeriod = 2 * time.Minute - // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type - MediumNotReadyTolerationSeconds = 60 - // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type - MediumUnreachableTolerationSeconds = 60 - - // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type - LowNodeStatusUpdateFrequency = 1 * time.Minute - // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type - LowNodeMonitorGracePeriod = 5 * time.Minute - // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type - LowNotReadyTolerationSeconds = 60 - // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type - LowUnreachableTolerationSeconds = 60 -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type NodeList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Node `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go deleted file mode 100644 index dce08a17..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ /dev/null @@ -1,598 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// OAuth Server and Identity Provider Config - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. -// It is used to configure the integrated OAuth server. -// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=oauths,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type OAuth struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata"` - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec OAuthSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status OAuthStatus `json:"status"` -} - -// OAuthSpec contains desired cluster auth configuration -type OAuthSpec struct { - // identityProviders is an ordered list of ways for a user to identify themselves. - // When this list is empty, no identities are provisioned for users. - // +optional - // +listType=atomic - IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` - - // tokenConfig contains options for authorization and access tokens - TokenConfig TokenConfig `json:"tokenConfig"` - - // templates allow you to customize pages like the login page. - // +optional - Templates OAuthTemplates `json:"templates"` -} - -// OAuthStatus shows current known state of OAuth server in the cluster -type OAuthStatus struct { - // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) -} - -// TokenConfig holds the necessary configuration options for authorization and access tokens -type TokenConfig struct { - // accessTokenMaxAgeSeconds defines the maximum age of access tokens - AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` - - // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. - // +optional - AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` - - // accessTokenInactivityTimeout defines the token inactivity timeout - // for tokens granted by any client. - // The value represents the maximum amount of time that can occur between - // consecutive uses of the token. Tokens become invalid if they are not - // used within this temporal window. The user will need to acquire a new - // token to regain access once a token times out. Takes valid time - // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed - // value for duration is 300s (5 minutes). If the timeout is configured - // per client, then that value takes precedence. If the timeout value is - // not specified and the client does not override the value, then tokens - // are valid until their lifetime. - // - // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value - // +optional - AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` -} - -const ( - // LoginTemplateKey is the key of the login template in a secret - LoginTemplateKey = "login.html" - - // ProviderSelectionTemplateKey is the key for the provider selection template in a secret - ProviderSelectionTemplateKey = "providers.html" - - // ErrorsTemplateKey is the key for the errors template in a secret - ErrorsTemplateKey = "errors.html" - - // BindPasswordKey is the key for the LDAP bind password in a secret - BindPasswordKey = "bindPassword" - - // ClientSecretKey is the key for the oauth client secret data in a secret - ClientSecretKey = "clientSecret" - - // HTPasswdDataKey is the key for the htpasswd file data in a secret - HTPasswdDataKey = "htpasswd" -) - -// OAuthTemplates allow for customization of pages like the login page -type OAuthTemplates struct { - // login is the name of a secret that specifies a go template to use to render the login page. - // The key "login.html" is used to locate the template data. - // If specified and the secret or expected key is not found, the default login page is used. - // If the specified template is not valid, the default login page is used. - // If unspecified, the default login page is used. - // The namespace for this secret is openshift-config. - // +optional - Login SecretNameReference `json:"login"` - - // providerSelection is the name of a secret that specifies a go template to use to render - // the provider selection page. - // The key "providers.html" is used to locate the template data. - // If specified and the secret or expected key is not found, the default provider selection page is used. - // If the specified template is not valid, the default provider selection page is used. - // If unspecified, the default provider selection page is used. - // The namespace for this secret is openshift-config. - // +optional - ProviderSelection SecretNameReference `json:"providerSelection"` - - // error is the name of a secret that specifies a go template to use to render error pages - // during the authentication or grant flow. - // The key "errors.html" is used to locate the template data. - // If specified and the secret or expected key is not found, the default error page is used. - // If the specified template is not valid, the default error page is used. - // If unspecified, the default error page is used. - // The namespace for this secret is openshift-config. - // +optional - Error SecretNameReference `json:"error"` -} - -// IdentityProvider provides identities for users authenticating using credentials -type IdentityProvider struct { - // name is used to qualify the identities returned by this provider. - // - It MUST be unique and not shared by any other identity provider used - // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" - // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName - Name string `json:"name"` - - // mappingMethod determines how identities from this provider are mapped to users - // Defaults to "claim" - // +optional - MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` - - IdentityProviderConfig `json:",inline"` -} - -// MappingMethodType specifies how new identities should be mapped to users when they log in -type MappingMethodType string - -const ( - // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user - // with that user name is already mapped to another identity. - // Default. - MappingMethodClaim MappingMethodType = "claim" - - // MappingMethodLookup looks up existing users already mapped to an identity but does not - // automatically provision users or identities. Requires identities and users be set up - // manually or using an external process. - MappingMethodLookup MappingMethodType = "lookup" - - // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with - // that user name already exists, the identity is mapped to the existing user, adding to any - // existing identity mappings for the user. - MappingMethodAdd MappingMethodType = "add" -) - -type IdentityProviderType string - -const ( - // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth - IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" - - // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials - IdentityProviderTypeGitHub IdentityProviderType = "GitHub" - - // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials - IdentityProviderTypeGitLab IdentityProviderType = "GitLab" - - // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials - IdentityProviderTypeGoogle IdentityProviderType = "Google" - - // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file - IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" - - // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials - IdentityProviderTypeKeystone IdentityProviderType = "Keystone" - - // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials - IdentityProviderTypeLDAP IdentityProviderType = "LDAP" - - // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials - IdentityProviderTypeOpenID IdentityProviderType = "OpenID" - - // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials - IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" -) - -// IdentityProviderConfig contains configuration for using a specific identity provider -type IdentityProviderConfig struct { - // type identifies the identity provider type for this entry. - Type IdentityProviderType `json:"type"` - - // Provider-specific configuration - // The json tag MUST match the `Type` specified above, case-insensitively - // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided - - // basicAuth contains configuration options for the BasicAuth IdP - // +optional - BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` - - // github enables user authentication using GitHub credentials - // +optional - GitHub *GitHubIdentityProvider `json:"github,omitempty"` - - // gitlab enables user authentication using GitLab credentials - // +optional - GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` - - // google enables user authentication using Google credentials - // +optional - Google *GoogleIdentityProvider `json:"google,omitempty"` - - // htpasswd enables user authentication using an HTPasswd file to validate credentials - // +optional - HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` - - // keystone enables user authentication using keystone password credentials - // +optional - Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` - - // ldap enables user authentication using LDAP credentials - // +optional - LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` - - // openID enables user authentication using OpenID credentials - // +optional - OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` - - // requestHeader enables user authentication using request header credentials - // +optional - RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` -} - -// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials -type BasicAuthIdentityProvider struct { - // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server - OAuthRemoteConnectionInfo `json:",inline"` -} - -// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection -type OAuthRemoteConnectionInfo struct { - // url is the remote URL to connect to - URL string `json:"url"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // If empty, the default system roots are used. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` - - // tlsClientCert is an optional reference to a secret by name that contains the - // PEM-encoded TLS client certificate to present when connecting to the server. - // The key "tls.crt" is used to locate the data. - // If specified and the secret or expected key is not found, the identity provider is not honored. - // If the specified certificate data is not valid, the identity provider is not honored. - // The namespace for this secret is openshift-config. - // +optional - TLSClientCert SecretNameReference `json:"tlsClientCert"` - - // tlsClientKey is an optional reference to a secret by name that contains the - // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. - // The key "tls.key" is used to locate the data. - // If specified and the secret or expected key is not found, the identity provider is not honored. - // If the specified certificate data is not valid, the identity provider is not honored. - // The namespace for this secret is openshift-config. - // +optional - TLSClientKey SecretNameReference `json:"tlsClientKey"` -} - -// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials -type HTPasswdIdentityProvider struct { - // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. - // The key "htpasswd" is used to locate the data. - // If the secret or expected key is not found, the identity provider is not honored. - // If the specified htpasswd data is not valid, the identity provider is not honored. - // The namespace for this secret is openshift-config. - FileData SecretNameReference `json:"fileData"` -} - -// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials -type LDAPIdentityProvider struct { - // url is an RFC 2255 URL which specifies the LDAP search parameters to use. - // The syntax of the URL is: - // ldap://host:port/basedn?attribute?scope?filter - URL string `json:"url"` - - // bindDN is an optional DN to bind with during the search phase. - // +optional - BindDN string `json:"bindDN"` - - // bindPassword is an optional reference to a secret by name - // containing a password to bind with during the search phase. - // The key "bindPassword" is used to locate the data. - // If specified and the secret or expected key is not found, the identity provider is not honored. - // The namespace for this secret is openshift-config. - // +optional - BindPassword SecretNameReference `json:"bindPassword"` - - // insecure, if true, indicates the connection should not use TLS - // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always - // attempt to connect using TLS, even when `insecure` is set to `true` - // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to - // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. - Insecure bool `json:"insecure"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // If empty, the default system roots are used. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` - - // attributes maps LDAP attributes to identities - Attributes LDAPAttributeMapping `json:"attributes"` -} - -// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields -type LDAPAttributeMapping struct { - // id is the list of attributes whose values should be used as the user ID. Required. - // First non-empty attribute is used. At least one attribute is required. If none of the listed - // attribute have a value, authentication fails. - // LDAP standard identity attribute is "dn" - ID []string `json:"id"` - - // preferredUsername is the list of attributes whose values should be used as the preferred username. - // LDAP standard login attribute is "uid" - // +optional - PreferredUsername []string `json:"preferredUsername,omitempty"` - - // name is the list of attributes whose values should be used as the display name. Optional. - // If unspecified, no display name is set for the identity - // LDAP standard display name attribute is "cn" - // +optional - Name []string `json:"name,omitempty"` - - // email is the list of attributes whose values should be used as the email address. Optional. - // If unspecified, no email is set for the identity - // +optional - Email []string `json:"email,omitempty"` -} - -// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials -type KeystoneIdentityProvider struct { - // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server - OAuthRemoteConnectionInfo `json:",inline"` - - // domainName is required for keystone v3 - DomainName string `json:"domainName"` - - // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration - // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID - // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility - // +optional - // UseUsernameIdentity bool `json:"useUsernameIdentity"` -} - -// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials -type RequestHeaderIdentityProvider struct { - // loginURL is a URL to redirect unauthenticated /authorize requests to - // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here - // ${url} is replaced with the current URL, escaped to be safe in a query parameter - // https://www.example.com/sso-login?then=${url} - // ${query} is replaced with the current query string - // https://www.example.com/auth-proxy/oauth/authorize?${query} - // Required when login is set to true. - LoginURL string `json:"loginURL"` - - // challengeURL is a URL to redirect unauthenticated /authorize requests to - // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be - // redirected here. - // ${url} is replaced with the current URL, escaped to be safe in a query parameter - // https://www.example.com/sso-login?then=${url} - // ${query} is replaced with the current query string - // https://www.example.com/auth-proxy/oauth/authorize?${query} - // Required when challenge is set to true. - ChallengeURL string `json:"challengeURL"` - - // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // Specifically, it allows verification of incoming requests to prevent header spoofing. - // The key "ca.crt" is used to locate the data. - // If the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // The namespace for this config map is openshift-config. - ClientCA ConfigMapNameReference `json:"ca"` - - // clientCommonNames is an optional list of common names to require a match from. If empty, any - // client certificate validated against the clientCA bundle is considered authoritative. - // +optional - ClientCommonNames []string `json:"clientCommonNames,omitempty"` - - // headers is the set of headers to check for identity information - Headers []string `json:"headers"` - - // preferredUsernameHeaders is the set of headers to check for the preferred username - PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` - - // nameHeaders is the set of headers to check for the display name - NameHeaders []string `json:"nameHeaders"` - - // emailHeaders is the set of headers to check for the email address - EmailHeaders []string `json:"emailHeaders"` -} - -// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials -type GitHubIdentityProvider struct { - // clientID is the oauth client ID - ClientID string `json:"clientID"` - - // clientSecret is a required reference to the secret by name containing the oauth client secret. - // The key "clientSecret" is used to locate the data. - // If the secret or expected key is not found, the identity provider is not honored. - // The namespace for this secret is openshift-config. - ClientSecret SecretNameReference `json:"clientSecret"` - - // organizations optionally restricts which organizations are allowed to log in - // +optional - Organizations []string `json:"organizations,omitempty"` - - // teams optionally restricts which teams are allowed to log in. Format is /. - // +optional - Teams []string `json:"teams,omitempty"` - - // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of - // GitHub Enterprise. - // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. - // +optional - Hostname string `json:"hostname"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // If empty, the default system roots are used. - // This can only be configured when hostname is set to a non-empty value. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` -} - -// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials -type GitLabIdentityProvider struct { - // clientID is the oauth client ID - ClientID string `json:"clientID"` - - // clientSecret is a required reference to the secret by name containing the oauth client secret. - // The key "clientSecret" is used to locate the data. - // If the secret or expected key is not found, the identity provider is not honored. - // The namespace for this secret is openshift-config. - ClientSecret SecretNameReference `json:"clientSecret"` - - // url is the oauth server base URL - URL string `json:"url"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // If empty, the default system roots are used. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` -} - -// GoogleIdentityProvider provides identities for users authenticating using Google credentials -type GoogleIdentityProvider struct { - // clientID is the oauth client ID - ClientID string `json:"clientID"` - - // clientSecret is a required reference to the secret by name containing the oauth client secret. - // The key "clientSecret" is used to locate the data. - // If the secret or expected key is not found, the identity provider is not honored. - // The namespace for this secret is openshift-config. - ClientSecret SecretNameReference `json:"clientSecret"` - - // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to - // +optional - HostedDomain string `json:"hostedDomain"` -} - -// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials -type OpenIDIdentityProvider struct { - // clientID is the oauth client ID - ClientID string `json:"clientID"` - - // clientSecret is a required reference to the secret by name containing the oauth client secret. - // The key "clientSecret" is used to locate the data. - // If the secret or expected key is not found, the identity provider is not honored. - // The namespace for this secret is openshift-config. - ClientSecret SecretNameReference `json:"clientSecret"` - - // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. - // It is used as a trust anchor to validate the TLS certificate presented by the remote server. - // The key "ca.crt" is used to locate the data. - // If specified and the config map or expected key is not found, the identity provider is not honored. - // If the specified ca data is not valid, the identity provider is not honored. - // If empty, the default system roots are used. - // The namespace for this config map is openshift-config. - // +optional - CA ConfigMapNameReference `json:"ca"` - - // extraScopes are any scopes to request in addition to the standard "openid" scope. - // +optional - ExtraScopes []string `json:"extraScopes,omitempty"` - - // extraAuthorizeParameters are any custom parameters to add to the authorize request. - // +optional - ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` - - // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. - // It must use the https scheme with no query or fragment component. - Issuer string `json:"issuer"` - - // claims mappings - Claims OpenIDClaims `json:"claims"` -} - -// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. -// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability -// -// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can -// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique -// and never reassigned within the Issuer for a particular End-User, as described in Section 2. -// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the -// iss Claim and the sub Claim." -const UserIDClaim = "sub" - -// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo -// responses -// +kubebuilder:validation:MinLength=1 -type OpenIDClaim string - -// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider -type OpenIDClaims struct { - // preferredUsername is the list of claims whose values should be used as the preferred username. - // If unspecified, the preferred username is determined from the value of the sub claim - // +listType=atomic - // +optional - PreferredUsername []string `json:"preferredUsername,omitempty"` - - // name is the list of claims whose values should be used as the display name. Optional. - // If unspecified, no display name is set for the identity - // +listType=atomic - // +optional - Name []string `json:"name,omitempty"` - - // email is the list of claims whose values should be used as the email address. Optional. - // If unspecified, no email is set for the identity - // +listType=atomic - // +optional - Email []string `json:"email,omitempty"` - - // groups is the list of claims value of which should be used to synchronize groups - // from the OIDC provider to OpenShift for the user. - // If multiple claims are specified, the first one with a non-empty value is used. - // +listType=atomic - // +optional - Groups []OpenIDClaim `json:"groups,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OAuthList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []OAuth `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go deleted file mode 100644 index 1fddfa51..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ /dev/null @@ -1,96 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// OperatorHubSpec defines the desired state of OperatorHub -type OperatorHubSpec struct { - // disableAllDefaultSources allows you to disable all the default hub - // sources. If this is true, a specific entry in sources can be used to - // enable a default source. If this is false, a specific entry in - // sources can be used to disable or enable a default source. - // +optional - DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` - // sources is the list of default hub sources and their configuration. - // If the list is empty, it implies that the default hub sources are - // enabled on the cluster unless disableAllDefaultSources is true. - // If disableAllDefaultSources is true and sources is not empty, - // the configuration present in sources will take precedence. The list of - // default hub sources and their current state will always be reflected in - // the status block. - // +optional - Sources []HubSource `json:"sources,omitempty"` -} - -// OperatorHubStatus defines the observed state of OperatorHub. The current -// state of the default hub sources will always be reflected here. -type OperatorHubStatus struct { - // sources encapsulates the result of applying the configuration for each - // hub source - Sources []HubSourceStatus `json:"sources,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OperatorHub is the Schema for the operatorhubs API. It can be used to change -// the state of the default hub sources for OperatorHub on the cluster from -// enabled to disabled and vice versa. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=operatorhubs,scope=Cluster -// +kubebuilder:subresource:status -// +genclient -// +genclient:nonNamespaced -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01 -// +openshift:capability=marketplace -// +openshift:compatibility-gen:level=1 -type OperatorHub struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata"` - - Spec OperatorHubSpec `json:"spec"` - Status OperatorHubStatus `json:"status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OperatorHubList contains a list of OperatorHub -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type OperatorHubList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - Items []OperatorHub `json:"items"` -} - -// HubSource is used to specify the hub source and its configuration -type HubSource struct { - // name is the name of one of the default hub sources - // +kubebuilder:validation:MaxLength=253 - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:Required - Name string `json:"name"` - // disabled is used to disable a default hub source on cluster - // +kubebuilder:Required - Disabled bool `json:"disabled"` -} - -// HubSourceStatus is used to reflect the current state of applying the -// configuration to a default source -type HubSourceStatus struct { - HubSource `json:",omitempty"` - // status indicates success or failure in applying the configuration - Status string `json:"status,omitempty"` - // message provides more information regarding failures - Message string `json:"message,omitempty"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go deleted file mode 100644 index 78fd3f41..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ /dev/null @@ -1,71 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Project holds cluster-wide information about Project. The canonical name is `cluster` -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=projects,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Project struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec ProjectSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ProjectStatus `json:"status"` -} - -// TemplateReference references a template in a specific namespace. -// The namespace must be specified at the point of use. -type TemplateReference struct { - // name is the metadata.name of the referenced project request template - Name string `json:"name"` -} - -// ProjectSpec holds the project creation configuration. -type ProjectSpec struct { - // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint - // +optional - ProjectRequestMessage string `json:"projectRequestMessage"` - - // projectRequestTemplate is the template to use for creating projects in response to projectrequest. - // This must point to a template in 'openshift-config' namespace. It is optional. - // If it is not specified, a default template is used. - // - // +optional - ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` -} - -type ProjectStatus struct { -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ProjectList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Project `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go deleted file mode 100644 index 2dfc66b1..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ /dev/null @@ -1,111 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=proxies,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Proxy struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds user-settable values for the proxy configuration - // +kubebuilder:validation:Required - // +required - Spec ProxySpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status ProxyStatus `json:"status"` -} - -// ProxySpec contains cluster proxy creation configuration. -type ProxySpec struct { - // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. - // +optional - HTTPProxy string `json:"httpProxy,omitempty"` - - // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. - // +optional - HTTPSProxy string `json:"httpsProxy,omitempty"` - - // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. - // Empty means unset and will not result in an env var. - // +optional - NoProxy string `json:"noProxy,omitempty"` - - // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. - // +optional - ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` - - // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. - // The trustedCA field should only be consumed by a proxy validator. The - // validator is responsible for reading the certificate bundle from the required - // key "ca-bundle.crt", merging it with the system default trust bundle, - // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" - // in the "openshift-config-managed" namespace. Clients that expect to make - // proxy connections must use the trusted-ca-bundle for all HTTPS requests to - // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as - // well. - // - // The namespace for the ConfigMap referenced by trustedCA is - // "openshift-config". Here is an example ConfigMap (in yaml): - // - // apiVersion: v1 - // kind: ConfigMap - // metadata: - // name: user-ca-bundle - // namespace: openshift-config - // data: - // ca-bundle.crt: | - // -----BEGIN CERTIFICATE----- - // Custom CA certificate bundle. - // -----END CERTIFICATE----- - // - // +optional - TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` -} - -// ProxyStatus shows current known state of the cluster proxy. -type ProxyStatus struct { - // httpProxy is the URL of the proxy for HTTP requests. - // +optional - HTTPProxy string `json:"httpProxy,omitempty"` - - // httpsProxy is the URL of the proxy for HTTPS requests. - // +optional - HTTPSProxy string `json:"httpsProxy,omitempty"` - - // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. - // +optional - NoProxy string `json:"noProxy,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type ProxyList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Proxy `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go deleted file mode 100644 index 2749f4f7..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ /dev/null @@ -1,145 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler -// and influence its placement decisions. The canonical name for this config is `cluster`. -// -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=schedulers,scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true -type Scheduler struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - // +required - Spec SchedulerSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status SchedulerStatus `json:"status"` -} - -type SchedulerSpec struct { - // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. - // policy is a reference to a ConfigMap containing scheduler policy which has - // user specified predicates and priorities. If this ConfigMap is not available - // scheduler will default to use DefaultAlgorithmProvider. - // The namespace for this configmap is openshift-config. - // +optional - Policy ConfigMapNameReference `json:"policy,omitempty"` - // profile sets which scheduling profile should be set in order to configure scheduling - // decisions for new pods. - // - // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" - // Defaults to "LowNodeUtilization" - // +optional - Profile SchedulerProfile `json:"profile,omitempty"` - // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. - // +openshift:enable:FeatureGate=DynamicResourceAllocation - // +optional - ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` - // defaultNodeSelector helps set the cluster-wide default node selector to - // restrict pod placement to specific nodes. This is applied to the pods - // created in all namespaces and creates an intersection with any existing - // nodeSelectors already set on a pod, additionally constraining that pod's selector. - // For example, - // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector - // field in pod spec to "type=user-node,region=east" to all pods created - // in all namespaces. Namespaces having project-wide node selectors won't be - // impacted even if this field is set. This adds an annotation section to - // the namespace. - // For example, if a new namespace is created with - // node-selector='type=user-node,region=east', - // the annotation openshift.io/node-selector: type=user-node,region=east - // gets added to the project. When the openshift.io/node-selector annotation - // is set on the project the value is used in preference to the value we are setting - // for defaultNodeSelector field. - // For instance, - // openshift.io/node-selector: "type=user-node,region=west" means - // that the default of "type=user-node,region=east" set in defaultNodeSelector - // would not be applied. - // +optional - DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` - // MastersSchedulable allows masters nodes to be schedulable. When this flag is - // turned on, all the master nodes in the cluster will be made schedulable, - // so that workload pods can run on them. The default value for this field is false, - // meaning none of the master nodes are schedulable. - // Important Note: Once the workload pods start running on the master nodes, - // extreme care must be taken to ensure that cluster-critical control plane components - // are not impacted. - // Please turn on this field after doing due diligence. - // +optional - MastersSchedulable bool `json:"mastersSchedulable"` -} - -// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring -type SchedulerProfile string - -var ( - // LowNodeUtililization is the default, and defines a scheduling profile which prefers to - // spread pods evenly among nodes targeting low resource consumption on each node. - LowNodeUtilization SchedulerProfile = "LowNodeUtilization" - - // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto - // as few nodes as possible targeting a small node count but high resource usage on each node. - HighNodeUtilization SchedulerProfile = "HighNodeUtilization" - - // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling - // at the expense of potentially less optimal pod placement decisions. - NoScoring SchedulerProfile = "NoScoring" -) - -// ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles -type ProfileCustomizations struct { - // dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. - // Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. - // Third-party resource drivers are responsible for tracking and allocating resources. - // Different kinds of resources support arbitrary parameters for defining requirements and initialization. - // Valid values are Enabled, Disabled and omitted. - // When omitted, this means no opinion and the platform is left to choose a reasonable default, - // which is subject to change over time. - // The current default is Disabled. - // +optional - DynamicResourceAllocation DRAEnablement `json:"dynamicResourceAllocation"` -} - -// +kubebuilder:validation:Enum:="";"Enabled";"Disabled" -type DRAEnablement string - -var ( - // DRAEnablementEnabled enables dynamic resource allocation feature - DRAEnablementEnabled DRAEnablement = "Enabled" - // DRAEnablementDisabled disables dynamic resource allocation feature - DRAEnablementDisabled DRAEnablement = "Disabled" -) - -type SchedulerStatus struct { -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +openshift:compatibility-gen:level=1 -type SchedulerList struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ListMeta `json:"metadata"` - - Items []Scheduler `json:"items"` -} diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go deleted file mode 100644 index 4d642e06..00000000 --- a/vendor/github.com/openshift/api/config/v1/types_testreporting.go +++ /dev/null @@ -1,46 +0,0 @@ -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into -// the payload for later analysis on a per-payload basis. -// This doesn't need any CRD because it's never stored in the cluster. -// -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:internal -type TestReporting struct { - metav1.TypeMeta `json:",inline"` - - // metadata is the standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +kubebuilder:validation:Required - // +required - Spec TestReportingSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status TestReportingStatus `json:"status"` -} - -type TestReportingSpec struct { - // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. - TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` -} - -type FeatureGateTests struct { - // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. - FeatureGate string `json:"featureGate"` - - // Tests contains an item for every TestName - Tests []TestDetails `json:"tests"` -} - -type TestDetails struct { - // TestName is the name of the test as it appears in junit XMLs. - // It does not include the suite name since the same test can be executed in many suites. - TestName string `json:"testName"` -} - -type TestReportingStatus struct { -} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go deleted file mode 100644 index 9a81bc55..00000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,6034 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServer) DeepCopyInto(out *APIServer) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. -func (in *APIServer) DeepCopy() *APIServer { - if in == nil { - return nil - } - out := new(APIServer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *APIServer) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. -func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { - if in == nil { - return nil - } - out := new(APIServerEncryption) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerList) DeepCopyInto(out *APIServerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]APIServer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. -func (in *APIServerList) DeepCopy() *APIServerList { - if in == nil { - return nil - } - out := new(APIServerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *APIServerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.ServingCertificate = in.ServingCertificate - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. -func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { - if in == nil { - return nil - } - out := new(APIServerNamedServingCert) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { - *out = *in - if in.NamedCertificates != nil { - in, out := &in.NamedCertificates, &out.NamedCertificates - *out = make([]APIServerNamedServingCert, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. -func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { - if in == nil { - return nil - } - out := new(APIServerServingCerts) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { - *out = *in - in.ServingCerts.DeepCopyInto(&out.ServingCerts) - out.ClientCA = in.ClientCA - if in.AdditionalCORSAllowedOrigins != nil { - in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Encryption = in.Encryption - if in.TLSSecurityProfile != nil { - in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile - *out = new(TLSSecurityProfile) - (*in).DeepCopyInto(*out) - } - in.Audit.DeepCopyInto(&out.Audit) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. -func (in *APIServerSpec) DeepCopy() *APIServerSpec { - if in == nil { - return nil - } - out := new(APIServerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. -func (in *APIServerStatus) DeepCopy() *APIServerStatus { - if in == nil { - return nil - } - out := new(APIServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSDNSSpec) DeepCopyInto(out *AWSDNSSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSSpec. -func (in *AWSDNSSpec) DeepCopy() *AWSDNSSpec { - if in == nil { - return nil - } - out := new(AWSDNSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSIngressSpec) DeepCopyInto(out *AWSIngressSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIngressSpec. -func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { - if in == nil { - return nil - } - out := new(AWSIngressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { - *out = *in - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]AWSServiceEndpoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. -func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { - if in == nil { - return nil - } - out := new(AWSPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { - *out = *in - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]AWSServiceEndpoint, len(*in)) - copy(*out, *in) - } - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AWSResourceTag, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. -func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { - if in == nil { - return nil - } - out := new(AWSPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. -func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { - if in == nil { - return nil - } - out := new(AWSResourceTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. -func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { - if in == nil { - return nil - } - out := new(AWSServiceEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { - *out = *in - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make(map[string]AdmissionPluginConfig, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.EnabledAdmissionPlugins != nil { - in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DisabledAdmissionPlugins != nil { - in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. -func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { - if in == nil { - return nil - } - out := new(AdmissionConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { - *out = *in - in.Configuration.DeepCopyInto(&out.Configuration) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. -func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { - if in == nil { - return nil - } - out := new(AdmissionPluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. -func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { - if in == nil { - return nil - } - out := new(AlibabaCloudPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { - *out = *in - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AlibabaCloudResourceTag, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. -func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { - if in == nil { - return nil - } - out := new(AlibabaCloudPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. -func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { - if in == nil { - return nil - } - out := new(AlibabaCloudResourceTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Audit) DeepCopyInto(out *Audit) { - *out = *in - if in.CustomRules != nil { - in, out := &in.CustomRules, &out.CustomRules - *out = make([]AuditCustomRule, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. -func (in *Audit) DeepCopy() *Audit { - if in == nil { - return nil - } - out := new(Audit) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { - *out = *in - in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. -func (in *AuditConfig) DeepCopy() *AuditConfig { - if in == nil { - return nil - } - out := new(AuditConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. -func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { - if in == nil { - return nil - } - out := new(AuditCustomRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Authentication) DeepCopyInto(out *Authentication) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. -func (in *Authentication) DeepCopy() *Authentication { - if in == nil { - return nil - } - out := new(Authentication) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Authentication) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Authentication, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. -func (in *AuthenticationList) DeepCopy() *AuthenticationList { - if in == nil { - return nil - } - out := new(AuthenticationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthenticationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { - *out = *in - out.OAuthMetadata = in.OAuthMetadata - if in.WebhookTokenAuthenticators != nil { - in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators - *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) - copy(*out, *in) - } - if in.WebhookTokenAuthenticator != nil { - in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator - *out = new(WebhookTokenAuthenticator) - **out = **in - } - if in.OIDCProviders != nil { - in, out := &in.OIDCProviders, &out.OIDCProviders - *out = make([]OIDCProvider, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. -func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { - if in == nil { - return nil - } - out := new(AuthenticationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { - *out = *in - out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata - if in.OIDCClients != nil { - in, out := &in.OIDCClients, &out.OIDCClients - *out = make([]OIDCClientStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. -func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { - if in == nil { - return nil - } - out := new(AuthenticationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. -func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { - if in == nil { - return nil - } - out := new(AzurePlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { - *out = *in - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AzureResourceTag, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. -func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { - if in == nil { - return nil - } - out := new(AzurePlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureResourceTag) DeepCopyInto(out *AzureResourceTag) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceTag. -func (in *AzureResourceTag) DeepCopy() *AzureResourceTag { - if in == nil { - return nil - } - out := new(AzureResourceTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalPlatformLoadBalancer) DeepCopyInto(out *BareMetalPlatformLoadBalancer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformLoadBalancer. -func (in *BareMetalPlatformLoadBalancer) DeepCopy() *BareMetalPlatformLoadBalancer { - if in == nil { - return nil - } - out := new(BareMetalPlatformLoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. -func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { - if in == nil { - return nil - } - out := new(BareMetalPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(BareMetalPlatformLoadBalancer) - **out = **in - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. -func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { - if in == nil { - return nil - } - out := new(BareMetalPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { - *out = *in - out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. -func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { - if in == nil { - return nil - } - out := new(BasicAuthIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Build) DeepCopyInto(out *Build) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. -func (in *Build) DeepCopy() *Build { - if in == nil { - return nil - } - out := new(Build) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Build) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { - *out = *in - if in.DefaultProxy != nil { - in, out := &in.DefaultProxy, &out.DefaultProxy - *out = new(ProxySpec) - (*in).DeepCopyInto(*out) - } - if in.GitProxy != nil { - in, out := &in.GitProxy, &out.GitProxy - *out = new(ProxySpec) - (*in).DeepCopyInto(*out) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ImageLabels != nil { - in, out := &in.ImageLabels, &out.ImageLabels - *out = make([]ImageLabel, len(*in)) - copy(*out, *in) - } - in.Resources.DeepCopyInto(&out.Resources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. -func (in *BuildDefaults) DeepCopy() *BuildDefaults { - if in == nil { - return nil - } - out := new(BuildDefaults) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BuildList) DeepCopyInto(out *BuildList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Build, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. -func (in *BuildList) DeepCopy() *BuildList { - if in == nil { - return nil - } - out := new(BuildList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BuildList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { - *out = *in - if in.ImageLabels != nil { - in, out := &in.ImageLabels, &out.ImageLabels - *out = make([]ImageLabel, len(*in)) - copy(*out, *in) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ForcePull != nil { - in, out := &in.ForcePull, &out.ForcePull - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. -func (in *BuildOverrides) DeepCopy() *BuildOverrides { - if in == nil { - return nil - } - out := new(BuildOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { - *out = *in - out.AdditionalTrustedCA = in.AdditionalTrustedCA - in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) - in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. -func (in *BuildSpec) DeepCopy() *BuildSpec { - if in == nil { - return nil - } - out := new(BuildSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertInfo) DeepCopyInto(out *CertInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. -func (in *CertInfo) DeepCopy() *CertInfo { - if in == nil { - return nil - } - out := new(CertInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. -func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { - if in == nil { - return nil - } - out := new(ClientConnectionOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudControllerManagerStatus) DeepCopyInto(out *CloudControllerManagerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerStatus. -func (in *CloudControllerManagerStatus) DeepCopy() *CloudControllerManagerStatus { - if in == nil { - return nil - } - out := new(CloudControllerManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudLoadBalancerConfig) DeepCopyInto(out *CloudLoadBalancerConfig) { - *out = *in - if in.ClusterHosted != nil { - in, out := &in.ClusterHosted, &out.ClusterHosted - *out = new(CloudLoadBalancerIPs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerConfig. -func (in *CloudLoadBalancerConfig) DeepCopy() *CloudLoadBalancerConfig { - if in == nil { - return nil - } - out := new(CloudLoadBalancerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudLoadBalancerIPs) DeepCopyInto(out *CloudLoadBalancerIPs) { - *out = *in - if in.APIIntLoadBalancerIPs != nil { - in, out := &in.APIIntLoadBalancerIPs, &out.APIIntLoadBalancerIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.APILoadBalancerIPs != nil { - in, out := &in.APILoadBalancerIPs, &out.APILoadBalancerIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.IngressLoadBalancerIPs != nil { - in, out := &in.IngressLoadBalancerIPs, &out.IngressLoadBalancerIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerIPs. -func (in *CloudLoadBalancerIPs) DeepCopy() *CloudLoadBalancerIPs { - if in == nil { - return nil - } - out := new(CloudLoadBalancerIPs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { - *out = *in - if in.PromQL != nil { - in, out := &in.PromQL, &out.PromQL - *out = new(PromQLClusterCondition) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. -func (in *ClusterCondition) DeepCopy() *ClusterCondition { - if in == nil { - return nil - } - out := new(ClusterCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. -func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { - if in == nil { - return nil - } - out := new(ClusterNetworkEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. -func (in *ClusterOperator) DeepCopy() *ClusterOperator { - if in == nil { - return nil - } - out := new(ClusterOperator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterOperator) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterOperator, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. -func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { - if in == nil { - return nil - } - out := new(ClusterOperatorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. -func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { - if in == nil { - return nil - } - out := new(ClusterOperatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterOperatorStatusCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]OperandVersion, len(*in)) - copy(*out, *in) - } - if in.RelatedObjects != nil { - in, out := &in.RelatedObjects, &out.RelatedObjects - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - in.Extension.DeepCopyInto(&out.Extension) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. -func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { - if in == nil { - return nil - } - out := new(ClusterOperatorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. -func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { - if in == nil { - return nil - } - out := new(ClusterOperatorStatusCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. -func (in *ClusterVersion) DeepCopy() *ClusterVersion { - if in == nil { - return nil - } - out := new(ClusterVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterVersion) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionCapabilitiesSpec) DeepCopyInto(out *ClusterVersionCapabilitiesSpec) { - *out = *in - if in.AdditionalEnabledCapabilities != nil { - in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities - *out = make([]ClusterVersionCapability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesSpec. -func (in *ClusterVersionCapabilitiesSpec) DeepCopy() *ClusterVersionCapabilitiesSpec { - if in == nil { - return nil - } - out := new(ClusterVersionCapabilitiesSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionCapabilitiesStatus) DeepCopyInto(out *ClusterVersionCapabilitiesStatus) { - *out = *in - if in.EnabledCapabilities != nil { - in, out := &in.EnabledCapabilities, &out.EnabledCapabilities - *out = make([]ClusterVersionCapability, len(*in)) - copy(*out, *in) - } - if in.KnownCapabilities != nil { - in, out := &in.KnownCapabilities, &out.KnownCapabilities - *out = make([]ClusterVersionCapability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesStatus. -func (in *ClusterVersionCapabilitiesStatus) DeepCopy() *ClusterVersionCapabilitiesStatus { - if in == nil { - return nil - } - out := new(ClusterVersionCapabilitiesStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterVersion, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. -func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { - if in == nil { - return nil - } - out := new(ClusterVersionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterVersionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { - *out = *in - if in.DesiredUpdate != nil { - in, out := &in.DesiredUpdate, &out.DesiredUpdate - *out = new(Update) - **out = **in - } - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(ClusterVersionCapabilitiesSpec) - (*in).DeepCopyInto(*out) - } - if in.SignatureStores != nil { - in, out := &in.SignatureStores, &out.SignatureStores - *out = make([]SignatureStore, len(*in)) - copy(*out, *in) - } - if in.Overrides != nil { - in, out := &in.Overrides, &out.Overrides - *out = make([]ComponentOverride, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. -func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { - if in == nil { - return nil - } - out := new(ClusterVersionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { - *out = *in - in.Desired.DeepCopyInto(&out.Desired) - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]UpdateHistory, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Capabilities.DeepCopyInto(&out.Capabilities) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterOperatorStatusCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AvailableUpdates != nil { - in, out := &in.AvailableUpdates, &out.AvailableUpdates - *out = make([]Release, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConditionalUpdates != nil { - in, out := &in.ConditionalUpdates, &out.ConditionalUpdates - *out = make([]ConditionalUpdate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. -func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { - if in == nil { - return nil - } - out := new(ClusterVersionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. -func (in *ComponentOverride) DeepCopy() *ComponentOverride { - if in == nil { - return nil - } - out := new(ComponentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { - *out = *in - out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. -func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { - if in == nil { - return nil - } - out := new(ComponentRouteSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { - *out = *in - if in.ConsumingUsers != nil { - in, out := &in.ConsumingUsers, &out.ConsumingUsers - *out = make([]ConsumingUser, len(*in)) - copy(*out, *in) - } - if in.CurrentHostnames != nil { - in, out := &in.CurrentHostnames, &out.CurrentHostnames - *out = make([]Hostname, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RelatedObjects != nil { - in, out := &in.RelatedObjects, &out.RelatedObjects - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. -func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { - if in == nil { - return nil - } - out := new(ComponentRouteStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { - *out = *in - in.Release.DeepCopyInto(&out.Release) - if in.Risks != nil { - in, out := &in.Risks, &out.Risks - *out = make([]ConditionalUpdateRisk, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. -func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { - if in == nil { - return nil - } - out := new(ConditionalUpdate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { - *out = *in - if in.MatchingRules != nil { - in, out := &in.MatchingRules, &out.MatchingRules - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. -func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { - if in == nil { - return nil - } - out := new(ConditionalUpdateRisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. -func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { - if in == nil { - return nil - } - out := new(ConfigMapFileReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. -func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { - if in == nil { - return nil - } - out := new(ConfigMapNameReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Console) DeepCopyInto(out *Console) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. -func (in *Console) DeepCopy() *Console { - if in == nil { - return nil - } - out := new(Console) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Console) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. -func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { - if in == nil { - return nil - } - out := new(ConsoleAuthentication) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Console, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. -func (in *ConsoleList) DeepCopy() *ConsoleList { - if in == nil { - return nil - } - out := new(ConsoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConsoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { - *out = *in - out.Authentication = in.Authentication - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. -func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { - if in == nil { - return nil - } - out := new(ConsoleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. -func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { - if in == nil { - return nil - } - out := new(ConsoleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]FeatureGateName, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]FeatureGateName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. -func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { - if in == nil { - return nil - } - out := new(CustomFeatureGates) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { - *out = *in - in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. -func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { - if in == nil { - return nil - } - out := new(CustomTLSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNS) DeepCopyInto(out *DNS) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. -func (in *DNS) DeepCopy() *DNS { - if in == nil { - return nil - } - out := new(DNS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DNS) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSList) DeepCopyInto(out *DNSList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DNS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. -func (in *DNSList) DeepCopy() *DNSList { - if in == nil { - return nil - } - out := new(DNSList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DNSList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSPlatformSpec) DeepCopyInto(out *DNSPlatformSpec) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSDNSSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPlatformSpec. -func (in *DNSPlatformSpec) DeepCopy() *DNSPlatformSpec { - if in == nil { - return nil - } - out := new(DNSPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { - *out = *in - if in.PublicZone != nil { - in, out := &in.PublicZone, &out.PublicZone - *out = new(DNSZone) - (*in).DeepCopyInto(*out) - } - if in.PrivateZone != nil { - in, out := &in.PrivateZone, &out.PrivateZone - *out = new(DNSZone) - (*in).DeepCopyInto(*out) - } - in.Platform.DeepCopyInto(&out.Platform) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. -func (in *DNSSpec) DeepCopy() *DNSSpec { - if in == nil { - return nil - } - out := new(DNSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. -func (in *DNSStatus) DeepCopy() *DNSStatus { - if in == nil { - return nil - } - out := new(DNSStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSZone) DeepCopyInto(out *DNSZone) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. -func (in *DNSZone) DeepCopy() *DNSZone { - if in == nil { - return nil - } - out := new(DNSZone) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. -func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { - if in == nil { - return nil - } - out := new(DelegatedAuthentication) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. -func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { - if in == nil { - return nil - } - out := new(DelegatedAuthorization) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { - *out = *in - out.KubeConfig = in.KubeConfig - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. -func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { - if in == nil { - return nil - } - out := new(DeprecatedWebhookTokenAuthenticator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. -func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { - if in == nil { - return nil - } - out := new(EquinixMetalPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. -func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { - if in == nil { - return nil - } - out := new(EquinixMetalPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { - *out = *in - if in.URLs != nil { - in, out := &in.URLs, &out.URLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.CertInfo = in.CertInfo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. -func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { - if in == nil { - return nil - } - out := new(EtcdConnectionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { - *out = *in - in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. -func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { - if in == nil { - return nil - } - out := new(EtcdStorageConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { - *out = *in - if in.Policy != nil { - in, out := &in.Policy, &out.Policy - *out = new(ExternalIPPolicy) - (*in).DeepCopyInto(*out) - } - if in.AutoAssignCIDRs != nil { - in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. -func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { - if in == nil { - return nil - } - out := new(ExternalIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { - *out = *in - if in.AllowedCIDRs != nil { - in, out := &in.AllowedCIDRs, &out.AllowedCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RejectedCIDRs != nil { - in, out := &in.RejectedCIDRs, &out.RejectedCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. -func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { - if in == nil { - return nil - } - out := new(ExternalIPPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalPlatformSpec) DeepCopyInto(out *ExternalPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformSpec. -func (in *ExternalPlatformSpec) DeepCopy() *ExternalPlatformSpec { - if in == nil { - return nil - } - out := new(ExternalPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalPlatformStatus) DeepCopyInto(out *ExternalPlatformStatus) { - *out = *in - out.CloudControllerManager = in.CloudControllerManager - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformStatus. -func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { - if in == nil { - return nil - } - out := new(ExternalPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. -func (in *FeatureGate) DeepCopy() *FeatureGate { - if in == nil { - return nil - } - out := new(FeatureGate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureGate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateAttributes) DeepCopyInto(out *FeatureGateAttributes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateAttributes. -func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { - if in == nil { - return nil - } - out := new(FeatureGateAttributes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]FeatureGateAttributes, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]FeatureGateAttributes, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDetails. -func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { - if in == nil { - return nil - } - out := new(FeatureGateDetails) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FeatureGate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. -func (in *FeatureGateList) DeepCopy() *FeatureGateList { - if in == nil { - return nil - } - out := new(FeatureGateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureGateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { - *out = *in - if in.CustomNoUpgrade != nil { - in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade - *out = new(CustomFeatureGates) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. -func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { - if in == nil { - return nil - } - out := new(FeatureGateSelection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { - *out = *in - in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. -func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { - if in == nil { - return nil - } - out := new(FeatureGateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FeatureGates != nil { - in, out := &in.FeatureGates, &out.FeatureGates - *out = make([]FeatureGateDetails, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. -func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { - if in == nil { - return nil - } - out := new(FeatureGateStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) { - *out = *in - if in.Tests != nil { - in, out := &in.Tests, &out.Tests - *out = make([]TestDetails, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests. -func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { - if in == nil { - return nil - } - out := new(FeatureGateTests) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. -func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { - if in == nil { - return nil - } - out := new(GCPPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { - *out = *in - if in.ResourceLabels != nil { - in, out := &in.ResourceLabels, &out.ResourceLabels - *out = make([]GCPResourceLabel, len(*in)) - copy(*out, *in) - } - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]GCPResourceTag, len(*in)) - copy(*out, *in) - } - if in.CloudLoadBalancerConfig != nil { - in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig - *out = new(CloudLoadBalancerConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. -func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { - if in == nil { - return nil - } - out := new(GCPPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPResourceLabel) DeepCopyInto(out *GCPResourceLabel) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceLabel. -func (in *GCPResourceLabel) DeepCopy() *GCPResourceLabel { - if in == nil { - return nil - } - out := new(GCPResourceLabel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPResourceTag) DeepCopyInto(out *GCPResourceTag) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceTag. -func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { - if in == nil { - return nil - } - out := new(GCPResourceTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { - *out = *in - in.ServingInfo.DeepCopyInto(&out.ServingInfo) - if in.CORSAllowedOrigins != nil { - in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.AuditConfig.DeepCopyInto(&out.AuditConfig) - in.StorageConfig.DeepCopyInto(&out.StorageConfig) - in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) - out.KubeClientConfig = in.KubeClientConfig - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. -func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { - if in == nil { - return nil - } - out := new(GenericAPIServerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { - *out = *in - in.ServingInfo.DeepCopyInto(&out.ServingInfo) - out.LeaderElection = in.LeaderElection - out.Authentication = in.Authentication - out.Authorization = in.Authorization - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. -func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { - if in == nil { - return nil - } - out := new(GenericControllerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { - *out = *in - out.ClientSecret = in.ClientSecret - if in.Organizations != nil { - in, out := &in.Organizations, &out.Organizations - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Teams != nil { - in, out := &in.Teams, &out.Teams - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.CA = in.CA - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. -func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { - if in == nil { - return nil - } - out := new(GitHubIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { - *out = *in - out.ClientSecret = in.ClientSecret - out.CA = in.CA - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. -func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { - if in == nil { - return nil - } - out := new(GitLabIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { - *out = *in - out.ClientSecret = in.ClientSecret - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. -func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { - if in == nil { - return nil - } - out := new(GoogleIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { - *out = *in - out.FileData = in.FileData - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. -func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { - if in == nil { - return nil - } - out := new(HTPasswdIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { - *out = *in - in.ServingInfo.DeepCopyInto(&out.ServingInfo) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. -func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { - if in == nil { - return nil - } - out := new(HTTPServingInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HubSource) DeepCopyInto(out *HubSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. -func (in *HubSource) DeepCopy() *HubSource { - if in == nil { - return nil - } - out := new(HubSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { - *out = *in - out.HubSource = in.HubSource - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. -func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { - if in == nil { - return nil - } - out := new(HubSourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. -func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { - if in == nil { - return nil - } - out := new(IBMCloudPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { - *out = *in - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]IBMCloudServiceEndpoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. -func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { - if in == nil { - return nil - } - out := new(IBMCloudPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudServiceEndpoint) DeepCopyInto(out *IBMCloudServiceEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudServiceEndpoint. -func (in *IBMCloudServiceEndpoint) DeepCopy() *IBMCloudServiceEndpoint { - if in == nil { - return nil - } - out := new(IBMCloudServiceEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { - *out = *in - in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. -func (in *IdentityProvider) DeepCopy() *IdentityProvider { - if in == nil { - return nil - } - out := new(IdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { - *out = *in - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuthIdentityProvider) - **out = **in - } - if in.GitHub != nil { - in, out := &in.GitHub, &out.GitHub - *out = new(GitHubIdentityProvider) - (*in).DeepCopyInto(*out) - } - if in.GitLab != nil { - in, out := &in.GitLab, &out.GitLab - *out = new(GitLabIdentityProvider) - **out = **in - } - if in.Google != nil { - in, out := &in.Google, &out.Google - *out = new(GoogleIdentityProvider) - **out = **in - } - if in.HTPasswd != nil { - in, out := &in.HTPasswd, &out.HTPasswd - *out = new(HTPasswdIdentityProvider) - **out = **in - } - if in.Keystone != nil { - in, out := &in.Keystone, &out.Keystone - *out = new(KeystoneIdentityProvider) - **out = **in - } - if in.LDAP != nil { - in, out := &in.LDAP, &out.LDAP - *out = new(LDAPIdentityProvider) - (*in).DeepCopyInto(*out) - } - if in.OpenID != nil { - in, out := &in.OpenID, &out.OpenID - *out = new(OpenIDIdentityProvider) - (*in).DeepCopyInto(*out) - } - if in.RequestHeader != nil { - in, out := &in.RequestHeader, &out.RequestHeader - *out = new(RequestHeaderIdentityProvider) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. -func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { - if in == nil { - return nil - } - out := new(IdentityProviderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Image) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. -func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { - if in == nil { - return nil - } - out := new(ImageContentPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ImageContentPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. -func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { - if in == nil { - return nil - } - out := new(ImageContentPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { - *out = *in - if in.RepositoryDigestMirrors != nil { - in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors - *out = make([]RepositoryDigestMirrors, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. -func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { - if in == nil { - return nil - } - out := new(ImageContentPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageDigestMirrorSet) DeepCopyInto(out *ImageDigestMirrorSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSet. -func (in *ImageDigestMirrorSet) DeepCopy() *ImageDigestMirrorSet { - if in == nil { - return nil - } - out := new(ImageDigestMirrorSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageDigestMirrorSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageDigestMirrorSetList) DeepCopyInto(out *ImageDigestMirrorSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ImageDigestMirrorSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetList. -func (in *ImageDigestMirrorSetList) DeepCopy() *ImageDigestMirrorSetList { - if in == nil { - return nil - } - out := new(ImageDigestMirrorSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageDigestMirrorSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageDigestMirrorSetSpec) DeepCopyInto(out *ImageDigestMirrorSetSpec) { - *out = *in - if in.ImageDigestMirrors != nil { - in, out := &in.ImageDigestMirrors, &out.ImageDigestMirrors - *out = make([]ImageDigestMirrors, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetSpec. -func (in *ImageDigestMirrorSetSpec) DeepCopy() *ImageDigestMirrorSetSpec { - if in == nil { - return nil - } - out := new(ImageDigestMirrorSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageDigestMirrorSetStatus) DeepCopyInto(out *ImageDigestMirrorSetStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetStatus. -func (in *ImageDigestMirrorSetStatus) DeepCopy() *ImageDigestMirrorSetStatus { - if in == nil { - return nil - } - out := new(ImageDigestMirrorSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageDigestMirrors) DeepCopyInto(out *ImageDigestMirrors) { - *out = *in - if in.Mirrors != nil { - in, out := &in.Mirrors, &out.Mirrors - *out = make([]ImageMirror, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrors. -func (in *ImageDigestMirrors) DeepCopy() *ImageDigestMirrors { - if in == nil { - return nil - } - out := new(ImageDigestMirrors) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. -func (in *ImageLabel) DeepCopy() *ImageLabel { - if in == nil { - return nil - } - out := new(ImageLabel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageList) DeepCopyInto(out *ImageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Image, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. -func (in *ImageList) DeepCopy() *ImageList { - if in == nil { - return nil - } - out := new(ImageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { - *out = *in - if in.AllowedRegistriesForImport != nil { - in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport - *out = make([]RegistryLocation, len(*in)) - copy(*out, *in) - } - if in.ExternalRegistryHostnames != nil { - in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.AdditionalTrustedCA = in.AdditionalTrustedCA - in.RegistrySources.DeepCopyInto(&out.RegistrySources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. -func (in *ImageSpec) DeepCopy() *ImageSpec { - if in == nil { - return nil - } - out := new(ImageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { - *out = *in - if in.ExternalRegistryHostnames != nil { - in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. -func (in *ImageStatus) DeepCopy() *ImageStatus { - if in == nil { - return nil - } - out := new(ImageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageTagMirrorSet) DeepCopyInto(out *ImageTagMirrorSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSet. -func (in *ImageTagMirrorSet) DeepCopy() *ImageTagMirrorSet { - if in == nil { - return nil - } - out := new(ImageTagMirrorSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageTagMirrorSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageTagMirrorSetList) DeepCopyInto(out *ImageTagMirrorSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ImageTagMirrorSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetList. -func (in *ImageTagMirrorSetList) DeepCopy() *ImageTagMirrorSetList { - if in == nil { - return nil - } - out := new(ImageTagMirrorSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageTagMirrorSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageTagMirrorSetSpec) DeepCopyInto(out *ImageTagMirrorSetSpec) { - *out = *in - if in.ImageTagMirrors != nil { - in, out := &in.ImageTagMirrors, &out.ImageTagMirrors - *out = make([]ImageTagMirrors, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetSpec. -func (in *ImageTagMirrorSetSpec) DeepCopy() *ImageTagMirrorSetSpec { - if in == nil { - return nil - } - out := new(ImageTagMirrorSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageTagMirrorSetStatus) DeepCopyInto(out *ImageTagMirrorSetStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetStatus. -func (in *ImageTagMirrorSetStatus) DeepCopy() *ImageTagMirrorSetStatus { - if in == nil { - return nil - } - out := new(ImageTagMirrorSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageTagMirrors) DeepCopyInto(out *ImageTagMirrors) { - *out = *in - if in.Mirrors != nil { - in, out := &in.Mirrors, &out.Mirrors - *out = make([]ImageMirror, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrors. -func (in *ImageTagMirrors) DeepCopy() *ImageTagMirrors { - if in == nil { - return nil - } - out := new(ImageTagMirrors) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. -func (in *Infrastructure) DeepCopy() *Infrastructure { - if in == nil { - return nil - } - out := new(Infrastructure) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Infrastructure) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Infrastructure, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. -func (in *InfrastructureList) DeepCopy() *InfrastructureList { - if in == nil { - return nil - } - out := new(InfrastructureList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InfrastructureList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { - *out = *in - out.CloudConfig = in.CloudConfig - in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. -func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { - if in == nil { - return nil - } - out := new(InfrastructureSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { - *out = *in - if in.PlatformStatus != nil { - in, out := &in.PlatformStatus, &out.PlatformStatus - *out = new(PlatformStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. -func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { - if in == nil { - return nil - } - out := new(InfrastructureStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Ingress) DeepCopyInto(out *Ingress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. -func (in *Ingress) DeepCopy() *Ingress { - if in == nil { - return nil - } - out := new(Ingress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Ingress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressList) DeepCopyInto(out *IngressList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. -func (in *IngressList) DeepCopy() *IngressList { - if in == nil { - return nil - } - out := new(IngressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressPlatformSpec) DeepCopyInto(out *IngressPlatformSpec) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSIngressSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPlatformSpec. -func (in *IngressPlatformSpec) DeepCopy() *IngressPlatformSpec { - if in == nil { - return nil - } - out := new(IngressPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { - *out = *in - if in.ComponentRoutes != nil { - in, out := &in.ComponentRoutes, &out.ComponentRoutes - *out = make([]ComponentRouteSpec, len(*in)) - copy(*out, *in) - } - if in.RequiredHSTSPolicies != nil { - in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies - *out = make([]RequiredHSTSPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. -func (in *IngressSpec) DeepCopy() *IngressSpec { - if in == nil { - return nil - } - out := new(IngressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { - *out = *in - if in.ComponentRoutes != nil { - in, out := &in.ComponentRoutes, &out.ComponentRoutes - *out = make([]ComponentRouteStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. -func (in *IngressStatus) DeepCopy() *IngressStatus { - if in == nil { - return nil - } - out := new(IngressStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. -func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { - if in == nil { - return nil - } - out := new(IntermediateTLSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { - *out = *in - out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. -func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { - if in == nil { - return nil - } - out := new(KeystoneIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { - *out = *in - out.ConnectionOverrides = in.ConnectionOverrides - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. -func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { - if in == nil { - return nil - } - out := new(KubeClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. -func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { - if in == nil { - return nil - } - out := new(KubevirtPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. -func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { - if in == nil { - return nil - } - out := new(KubevirtPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PreferredUsername != nil { - in, out := &in.PreferredUsername, &out.PreferredUsername - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Email != nil { - in, out := &in.Email, &out.Email - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. -func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { - if in == nil { - return nil - } - out := new(LDAPAttributeMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { - *out = *in - out.BindPassword = in.BindPassword - out.CA = in.CA - in.Attributes.DeepCopyInto(&out.Attributes) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. -func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { - if in == nil { - return nil - } - out := new(LDAPIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { - *out = *in - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. -func (in *LeaderElection) DeepCopy() *LeaderElection { - if in == nil { - return nil - } - out := new(LeaderElection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - *out = *in - in.Platform.DeepCopyInto(&out.Platform) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. -func (in *LoadBalancer) DeepCopy() *LoadBalancer { - if in == nil { - return nil - } - out := new(LoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { - *out = *in - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(MTUMigrationValues) - (*in).DeepCopyInto(*out) - } - if in.Machine != nil { - in, out := &in.Machine, &out.Machine - *out = new(MTUMigrationValues) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. -func (in *MTUMigration) DeepCopy() *MTUMigration { - if in == nil { - return nil - } - out := new(MTUMigration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { - *out = *in - if in.To != nil { - in, out := &in.To, &out.To - *out = new(uint32) - **out = **in - } - if in.From != nil { - in, out := &in.From, &out.From - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. -func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { - if in == nil { - return nil - } - out := new(MTUMigrationValues) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) { - *out = *in - if in.LargestMaxAge != nil { - in, out := &in.LargestMaxAge, &out.LargestMaxAge - *out = new(int32) - **out = **in - } - if in.SmallestMaxAge != nil { - in, out := &in.SmallestMaxAge, &out.SmallestMaxAge - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy. -func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy { - if in == nil { - return nil - } - out := new(MaxAgePolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. -func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { - if in == nil { - return nil - } - out := new(ModernTLSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.CertInfo = in.CertInfo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. -func (in *NamedCertificate) DeepCopy() *NamedCertificate { - if in == nil { - return nil - } - out := new(NamedCertificate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Network) DeepCopyInto(out *Network) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. -func (in *Network) DeepCopy() *Network { - if in == nil { - return nil - } - out := new(Network) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Network) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) { - *out = *in - in.SourcePlacement.DeepCopyInto(&out.SourcePlacement) - in.TargetPlacement.DeepCopyInto(&out.TargetPlacement) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics. -func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics { - if in == nil { - return nil - } - out := new(NetworkDiagnostics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement. -func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement { - if in == nil { - return nil - } - out := new(NetworkDiagnosticsSourcePlacement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement. -func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement { - if in == nil { - return nil - } - out := new(NetworkDiagnosticsTargetPlacement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkList) DeepCopyInto(out *NetworkList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Network, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. -func (in *NetworkList) DeepCopy() *NetworkList { - if in == nil { - return nil - } - out := new(NetworkList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { - *out = *in - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(MTUMigration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. -func (in *NetworkMigration) DeepCopy() *NetworkMigration { - if in == nil { - return nil - } - out := new(NetworkMigration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { - *out = *in - if in.ClusterNetwork != nil { - in, out := &in.ClusterNetwork, &out.ClusterNetwork - *out = make([]ClusterNetworkEntry, len(*in)) - copy(*out, *in) - } - if in.ServiceNetwork != nil { - in, out := &in.ServiceNetwork, &out.ServiceNetwork - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExternalIP != nil { - in, out := &in.ExternalIP, &out.ExternalIP - *out = new(ExternalIPConfig) - (*in).DeepCopyInto(*out) - } - in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. -func (in *NetworkSpec) DeepCopy() *NetworkSpec { - if in == nil { - return nil - } - out := new(NetworkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { - *out = *in - if in.ClusterNetwork != nil { - in, out := &in.ClusterNetwork, &out.ClusterNetwork - *out = make([]ClusterNetworkEntry, len(*in)) - copy(*out, *in) - } - if in.ServiceNetwork != nil { - in, out := &in.ServiceNetwork, &out.ServiceNetwork - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Migration != nil { - in, out := &in.Migration, &out.Migration - *out = new(NetworkMigration) - (*in).DeepCopyInto(*out) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. -func (in *NetworkStatus) DeepCopy() *NetworkStatus { - if in == nil { - return nil - } - out := new(NetworkStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) { - *out = *in - in.Cluster.DeepCopyInto(&out.Cluster) - if in.Subnets != nil { - in, out := &in.Subnets, &out.Subnets - *out = make([]NutanixResourceIdentifier, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomain. -func (in *NutanixFailureDomain) DeepCopy() *NutanixFailureDomain { - if in == nil { - return nil - } - out := new(NutanixFailureDomain) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformLoadBalancer. -func (in *NutanixPlatformLoadBalancer) DeepCopy() *NutanixPlatformLoadBalancer { - if in == nil { - return nil - } - out := new(NutanixPlatformLoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { - *out = *in - out.PrismCentral = in.PrismCentral - if in.PrismElements != nil { - in, out := &in.PrismElements, &out.PrismElements - *out = make([]NutanixPrismElementEndpoint, len(*in)) - copy(*out, *in) - } - if in.FailureDomains != nil { - in, out := &in.FailureDomains, &out.FailureDomains - *out = make([]NutanixFailureDomain, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformSpec. -func (in *NutanixPlatformSpec) DeepCopy() *NutanixPlatformSpec { - if in == nil { - return nil - } - out := new(NutanixPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(NutanixPlatformLoadBalancer) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformStatus. -func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus { - if in == nil { - return nil - } - out := new(NutanixPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) { - *out = *in - out.Endpoint = in.Endpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint. -func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint { - if in == nil { - return nil - } - out := new(NutanixPrismElementEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint. -func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { - if in == nil { - return nil - } - out := new(NutanixPrismEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { - *out = *in - if in.UUID != nil { - in, out := &in.UUID, &out.UUID - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. -func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { - if in == nil { - return nil - } - out := new(NutanixResourceIdentifier) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuth) DeepCopyInto(out *OAuth) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. -func (in *OAuth) DeepCopy() *OAuth { - if in == nil { - return nil - } - out := new(OAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OAuth) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthList) DeepCopyInto(out *OAuthList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OAuth, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. -func (in *OAuthList) DeepCopy() *OAuthList { - if in == nil { - return nil - } - out := new(OAuthList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OAuthList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { - *out = *in - out.CA = in.CA - out.TLSClientCert = in.TLSClientCert - out.TLSClientKey = in.TLSClientKey - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. -func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { - if in == nil { - return nil - } - out := new(OAuthRemoteConnectionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { - *out = *in - if in.IdentityProviders != nil { - in, out := &in.IdentityProviders, &out.IdentityProviders - *out = make([]IdentityProvider, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.TokenConfig.DeepCopyInto(&out.TokenConfig) - out.Templates = in.Templates - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. -func (in *OAuthSpec) DeepCopy() *OAuthSpec { - if in == nil { - return nil - } - out := new(OAuthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. -func (in *OAuthStatus) DeepCopy() *OAuthStatus { - if in == nil { - return nil - } - out := new(OAuthStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { - *out = *in - out.Login = in.Login - out.ProviderSelection = in.ProviderSelection - out.Error = in.Error - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. -func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { - if in == nil { - return nil - } - out := new(OAuthTemplates) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) { - *out = *in - out.ClientSecret = in.ClientSecret - if in.ExtraScopes != nil { - in, out := &in.ExtraScopes, &out.ExtraScopes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig. -func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig { - if in == nil { - return nil - } - out := new(OIDCClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCClientReference) DeepCopyInto(out *OIDCClientReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientReference. -func (in *OIDCClientReference) DeepCopy() *OIDCClientReference { - if in == nil { - return nil - } - out := new(OIDCClientReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCClientStatus) DeepCopyInto(out *OIDCClientStatus) { - *out = *in - if in.CurrentOIDCClients != nil { - in, out := &in.CurrentOIDCClients, &out.CurrentOIDCClients - *out = make([]OIDCClientReference, len(*in)) - copy(*out, *in) - } - if in.ConsumingUsers != nil { - in, out := &in.ConsumingUsers, &out.ConsumingUsers - *out = make([]ConsumingUser, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientStatus. -func (in *OIDCClientStatus) DeepCopy() *OIDCClientStatus { - if in == nil { - return nil - } - out := new(OIDCClientStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { - *out = *in - in.Issuer.DeepCopyInto(&out.Issuer) - if in.OIDCClients != nil { - in, out := &in.OIDCClients, &out.OIDCClients - *out = make([]OIDCClientConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) - if in.ClaimValidationRules != nil { - in, out := &in.ClaimValidationRules, &out.ClaimValidationRules - *out = make([]TokenClaimValidationRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider. -func (in *OIDCProvider) DeepCopy() *OIDCProvider { - if in == nil { - return nil - } - out := new(OIDCProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. -func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { - if in == nil { - return nil - } - out := new(OldTLSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { - *out = *in - if in.PreferredUsername != nil { - in, out := &in.PreferredUsername, &out.PreferredUsername - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Email != nil { - in, out := &in.Email, &out.Email - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]OpenIDClaim, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. -func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { - if in == nil { - return nil - } - out := new(OpenIDClaims) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { - *out = *in - out.ClientSecret = in.ClientSecret - out.CA = in.CA - if in.ExtraScopes != nil { - in, out := &in.ExtraScopes, &out.ExtraScopes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExtraAuthorizeParameters != nil { - in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Claims.DeepCopyInto(&out.Claims) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. -func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { - if in == nil { - return nil - } - out := new(OpenIDIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackPlatformLoadBalancer) DeepCopyInto(out *OpenStackPlatformLoadBalancer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformLoadBalancer. -func (in *OpenStackPlatformLoadBalancer) DeepCopy() *OpenStackPlatformLoadBalancer { - if in == nil { - return nil - } - out := new(OpenStackPlatformLoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. -func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { - if in == nil { - return nil - } - out := new(OpenStackPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(OpenStackPlatformLoadBalancer) - **out = **in - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. -func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { - if in == nil { - return nil - } - out := new(OpenStackPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. -func (in *OperandVersion) DeepCopy() *OperandVersion { - if in == nil { - return nil - } - out := new(OperandVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. -func (in *OperatorHub) DeepCopy() *OperatorHub { - if in == nil { - return nil - } - out := new(OperatorHub) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorHub) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OperatorHub, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. -func (in *OperatorHubList) DeepCopy() *OperatorHubList { - if in == nil { - return nil - } - out := new(OperatorHubList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorHubList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]HubSource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. -func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { - if in == nil { - return nil - } - out := new(OperatorHubSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]HubSourceStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. -func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { - if in == nil { - return nil - } - out := new(OperatorHubStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OvirtPlatformLoadBalancer) DeepCopyInto(out *OvirtPlatformLoadBalancer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformLoadBalancer. -func (in *OvirtPlatformLoadBalancer) DeepCopy() *OvirtPlatformLoadBalancer { - if in == nil { - return nil - } - out := new(OvirtPlatformLoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. -func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { - if in == nil { - return nil - } - out := new(OvirtPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(OvirtPlatformLoadBalancer) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. -func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { - if in == nil { - return nil - } - out := new(OvirtPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzurePlatformSpec) - **out = **in - } - if in.GCP != nil { - in, out := &in.GCP, &out.GCP - *out = new(GCPPlatformSpec) - **out = **in - } - if in.BareMetal != nil { - in, out := &in.BareMetal, &out.BareMetal - *out = new(BareMetalPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.OpenStack != nil { - in, out := &in.OpenStack, &out.OpenStack - *out = new(OpenStackPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.Ovirt != nil { - in, out := &in.Ovirt, &out.Ovirt - *out = new(OvirtPlatformSpec) - **out = **in - } - if in.VSphere != nil { - in, out := &in.VSphere, &out.VSphere - *out = new(VSpherePlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.IBMCloud != nil { - in, out := &in.IBMCloud, &out.IBMCloud - *out = new(IBMCloudPlatformSpec) - **out = **in - } - if in.Kubevirt != nil { - in, out := &in.Kubevirt, &out.Kubevirt - *out = new(KubevirtPlatformSpec) - **out = **in - } - if in.EquinixMetal != nil { - in, out := &in.EquinixMetal, &out.EquinixMetal - *out = new(EquinixMetalPlatformSpec) - **out = **in - } - if in.PowerVS != nil { - in, out := &in.PowerVS, &out.PowerVS - *out = new(PowerVSPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.AlibabaCloud != nil { - in, out := &in.AlibabaCloud, &out.AlibabaCloud - *out = new(AlibabaCloudPlatformSpec) - **out = **in - } - if in.Nutanix != nil { - in, out := &in.Nutanix, &out.Nutanix - *out = new(NutanixPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalPlatformSpec) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. -func (in *PlatformSpec) DeepCopy() *PlatformSpec { - if in == nil { - return nil - } - out := new(PlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzurePlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.GCP != nil { - in, out := &in.GCP, &out.GCP - *out = new(GCPPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.BareMetal != nil { - in, out := &in.BareMetal, &out.BareMetal - *out = new(BareMetalPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.OpenStack != nil { - in, out := &in.OpenStack, &out.OpenStack - *out = new(OpenStackPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.Ovirt != nil { - in, out := &in.Ovirt, &out.Ovirt - *out = new(OvirtPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.VSphere != nil { - in, out := &in.VSphere, &out.VSphere - *out = new(VSpherePlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.IBMCloud != nil { - in, out := &in.IBMCloud, &out.IBMCloud - *out = new(IBMCloudPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.Kubevirt != nil { - in, out := &in.Kubevirt, &out.Kubevirt - *out = new(KubevirtPlatformStatus) - **out = **in - } - if in.EquinixMetal != nil { - in, out := &in.EquinixMetal, &out.EquinixMetal - *out = new(EquinixMetalPlatformStatus) - **out = **in - } - if in.PowerVS != nil { - in, out := &in.PowerVS, &out.PowerVS - *out = new(PowerVSPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.AlibabaCloud != nil { - in, out := &in.AlibabaCloud, &out.AlibabaCloud - *out = new(AlibabaCloudPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.Nutanix != nil { - in, out := &in.Nutanix, &out.Nutanix - *out = new(NutanixPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalPlatformStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. -func (in *PlatformStatus) DeepCopy() *PlatformStatus { - if in == nil { - return nil - } - out := new(PlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { - *out = *in - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]PowerVSServiceEndpoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. -func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { - if in == nil { - return nil - } - out := new(PowerVSPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { - *out = *in - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]PowerVSServiceEndpoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. -func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { - if in == nil { - return nil - } - out := new(PowerVSPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. -func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { - if in == nil { - return nil - } - out := new(PowerVSServiceEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { - *out = *in - out.TokenClaimMapping = in.TokenClaimMapping - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. -func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { - if in == nil { - return nil - } - out := new(PrefixedClaimMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProfileCustomizations) DeepCopyInto(out *ProfileCustomizations) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileCustomizations. -func (in *ProfileCustomizations) DeepCopy() *ProfileCustomizations { - if in == nil { - return nil - } - out := new(ProfileCustomizations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Project) DeepCopyInto(out *Project) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. -func (in *Project) DeepCopy() *Project { - if in == nil { - return nil - } - out := new(Project) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Project) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectList) DeepCopyInto(out *ProjectList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Project, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. -func (in *ProjectList) DeepCopy() *ProjectList { - if in == nil { - return nil - } - out := new(ProjectList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProjectList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { - *out = *in - out.ProjectRequestTemplate = in.ProjectRequestTemplate - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. -func (in *ProjectSpec) DeepCopy() *ProjectSpec { - if in == nil { - return nil - } - out := new(ProjectSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. -func (in *ProjectStatus) DeepCopy() *ProjectStatus { - if in == nil { - return nil - } - out := new(ProjectStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. -func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { - if in == nil { - return nil - } - out := new(PromQLClusterCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Proxy) DeepCopyInto(out *Proxy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. -func (in *Proxy) DeepCopy() *Proxy { - if in == nil { - return nil - } - out := new(Proxy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Proxy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProxyList) DeepCopyInto(out *ProxyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Proxy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. -func (in *ProxyList) DeepCopy() *ProxyList { - if in == nil { - return nil - } - out := new(ProxyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProxyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { - *out = *in - if in.ReadinessEndpoints != nil { - in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.TrustedCA = in.TrustedCA - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. -func (in *ProxySpec) DeepCopy() *ProxySpec { - if in == nil { - return nil - } - out := new(ProxySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. -func (in *ProxyStatus) DeepCopy() *ProxyStatus { - if in == nil { - return nil - } - out := new(ProxyStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. -func (in *RegistryLocation) DeepCopy() *RegistryLocation { - if in == nil { - return nil - } - out := new(RegistryLocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { - *out = *in - if in.InsecureRegistries != nil { - in, out := &in.InsecureRegistries, &out.InsecureRegistries - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.BlockedRegistries != nil { - in, out := &in.BlockedRegistries, &out.BlockedRegistries - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedRegistries != nil { - in, out := &in.AllowedRegistries, &out.AllowedRegistries - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ContainerRuntimeSearchRegistries != nil { - in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. -func (in *RegistrySources) DeepCopy() *RegistrySources { - if in == nil { - return nil - } - out := new(RegistrySources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Release) DeepCopyInto(out *Release) { - *out = *in - if in.Channels != nil { - in, out := &in.Channels, &out.Channels - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. -func (in *Release) DeepCopy() *Release { - if in == nil { - return nil - } - out := new(Release) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { - *out = *in - out.CertInfo = in.CertInfo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. -func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { - if in == nil { - return nil - } - out := new(RemoteConnectionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { - *out = *in - if in.Mirrors != nil { - in, out := &in.Mirrors, &out.Mirrors - *out = make([]Mirror, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. -func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { - if in == nil { - return nil - } - out := new(RepositoryDigestMirrors) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { - *out = *in - out.ClientCA = in.ClientCA - if in.ClientCommonNames != nil { - in, out := &in.ClientCommonNames, &out.ClientCommonNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Headers != nil { - in, out := &in.Headers, &out.Headers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PreferredUsernameHeaders != nil { - in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NameHeaders != nil { - in, out := &in.NameHeaders, &out.NameHeaders - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.EmailHeaders != nil { - in, out := &in.EmailHeaders, &out.EmailHeaders - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. -func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { - if in == nil { - return nil - } - out := new(RequestHeaderIdentityProvider) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) { - *out = *in - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.DomainPatterns != nil { - in, out := &in.DomainPatterns, &out.DomainPatterns - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.MaxAge.DeepCopyInto(&out.MaxAge) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy. -func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy { - if in == nil { - return nil - } - out := new(RequiredHSTSPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scheduler) DeepCopyInto(out *Scheduler) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. -func (in *Scheduler) DeepCopy() *Scheduler { - if in == nil { - return nil - } - out := new(Scheduler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scheduler) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Scheduler, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. -func (in *SchedulerList) DeepCopy() *SchedulerList { - if in == nil { - return nil - } - out := new(SchedulerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SchedulerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { - *out = *in - out.Policy = in.Policy - out.ProfileCustomizations = in.ProfileCustomizations - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. -func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { - if in == nil { - return nil - } - out := new(SchedulerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. -func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { - if in == nil { - return nil - } - out := new(SchedulerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. -func (in *SecretNameReference) DeepCopy() *SecretNameReference { - if in == nil { - return nil - } - out := new(SecretNameReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { - *out = *in - out.CertInfo = in.CertInfo - if in.NamedCertificates != nil { - in, out := &in.NamedCertificates, &out.NamedCertificates - *out = make([]NamedCertificate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CipherSuites != nil { - in, out := &in.CipherSuites, &out.CipherSuites - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. -func (in *ServingInfo) DeepCopy() *ServingInfo { - if in == nil { - return nil - } - out := new(ServingInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignatureStore) DeepCopyInto(out *SignatureStore) { - *out = *in - out.CA = in.CA - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureStore. -func (in *SignatureStore) DeepCopy() *SignatureStore { - if in == nil { - return nil - } - out := new(SignatureStore) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StringSource) DeepCopyInto(out *StringSource) { - *out = *in - out.StringSourceSpec = in.StringSourceSpec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. -func (in *StringSource) DeepCopy() *StringSource { - if in == nil { - return nil - } - out := new(StringSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. -func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { - if in == nil { - return nil - } - out := new(StringSourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { - *out = *in - if in.Ciphers != nil { - in, out := &in.Ciphers, &out.Ciphers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. -func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { - if in == nil { - return nil - } - out := new(TLSProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { - *out = *in - if in.Old != nil { - in, out := &in.Old, &out.Old - *out = new(OldTLSProfile) - **out = **in - } - if in.Intermediate != nil { - in, out := &in.Intermediate, &out.Intermediate - *out = new(IntermediateTLSProfile) - **out = **in - } - if in.Modern != nil { - in, out := &in.Modern, &out.Modern - *out = new(ModernTLSProfile) - **out = **in - } - if in.Custom != nil { - in, out := &in.Custom, &out.Custom - *out = new(CustomTLSProfile) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. -func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { - if in == nil { - return nil - } - out := new(TLSSecurityProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. -func (in *TemplateReference) DeepCopy() *TemplateReference { - if in == nil { - return nil - } - out := new(TemplateReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestDetails) DeepCopyInto(out *TestDetails) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails. -func (in *TestDetails) DeepCopy() *TestDetails { - if in == nil { - return nil - } - out := new(TestDetails) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestReporting) DeepCopyInto(out *TestReporting) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting. -func (in *TestReporting) DeepCopy() *TestReporting { - if in == nil { - return nil - } - out := new(TestReporting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) { - *out = *in - if in.TestsForFeatureGates != nil { - in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates - *out = make([]FeatureGateTests, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec. -func (in *TestReportingSpec) DeepCopy() *TestReportingSpec { - if in == nil { - return nil - } - out := new(TestReportingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus. -func (in *TestReportingStatus) DeepCopy() *TestReportingStatus { - if in == nil { - return nil - } - out := new(TestReportingStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping. -func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping { - if in == nil { - return nil - } - out := new(TokenClaimMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { - *out = *in - in.Username.DeepCopyInto(&out.Username) - out.Groups = in.Groups - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. -func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { - if in == nil { - return nil - } - out := new(TokenClaimMappings) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { - *out = *in - if in.RequiredClaim != nil { - in, out := &in.RequiredClaim, &out.RequiredClaim - *out = new(TokenRequiredClaim) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. -func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { - if in == nil { - return nil - } - out := new(TokenClaimValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { - *out = *in - if in.AccessTokenInactivityTimeout != nil { - in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout - *out = new(metav1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. -func (in *TokenConfig) DeepCopy() *TokenConfig { - if in == nil { - return nil - } - out := new(TokenConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { - *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]TokenAudience, len(*in)) - copy(*out, *in) - } - out.CertificateAuthority = in.CertificateAuthority - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. -func (in *TokenIssuer) DeepCopy() *TokenIssuer { - if in == nil { - return nil - } - out := new(TokenIssuer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. -func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { - if in == nil { - return nil - } - out := new(TokenRequiredClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Update) DeepCopyInto(out *Update) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. -func (in *Update) DeepCopy() *Update { - if in == nil { - return nil - } - out := new(Update) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { - *out = *in - in.StartedTime.DeepCopyInto(&out.StartedTime) - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. -func (in *UpdateHistory) DeepCopy() *UpdateHistory { - if in == nil { - return nil - } - out := new(UpdateHistory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { - *out = *in - out.TokenClaimMapping = in.TokenClaimMapping - if in.Prefix != nil { - in, out := &in.Prefix, &out.Prefix - *out = new(UsernamePrefix) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. -func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { - if in == nil { - return nil - } - out := new(UsernameClaimMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix. -func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { - if in == nil { - return nil - } - out := new(UsernamePrefix) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { - *out = *in - in.Topology.DeepCopyInto(&out.Topology) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformFailureDomainSpec. -func (in *VSpherePlatformFailureDomainSpec) DeepCopy() *VSpherePlatformFailureDomainSpec { - if in == nil { - return nil - } - out := new(VSpherePlatformFailureDomainSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformLoadBalancer) DeepCopyInto(out *VSpherePlatformLoadBalancer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformLoadBalancer. -func (in *VSpherePlatformLoadBalancer) DeepCopy() *VSpherePlatformLoadBalancer { - if in == nil { - return nil - } - out := new(VSpherePlatformLoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformNodeNetworking) DeepCopyInto(out *VSpherePlatformNodeNetworking) { - *out = *in - in.External.DeepCopyInto(&out.External) - in.Internal.DeepCopyInto(&out.Internal) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworking. -func (in *VSpherePlatformNodeNetworking) DeepCopy() *VSpherePlatformNodeNetworking { - if in == nil { - return nil - } - out := new(VSpherePlatformNodeNetworking) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformNodeNetworkingSpec) DeepCopyInto(out *VSpherePlatformNodeNetworkingSpec) { - *out = *in - if in.NetworkSubnetCIDR != nil { - in, out := &in.NetworkSubnetCIDR, &out.NetworkSubnetCIDR - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExcludeNetworkSubnetCIDR != nil { - in, out := &in.ExcludeNetworkSubnetCIDR, &out.ExcludeNetworkSubnetCIDR - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworkingSpec. -func (in *VSpherePlatformNodeNetworkingSpec) DeepCopy() *VSpherePlatformNodeNetworkingSpec { - if in == nil { - return nil - } - out := new(VSpherePlatformNodeNetworkingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { - *out = *in - if in.VCenters != nil { - in, out := &in.VCenters, &out.VCenters - *out = make([]VSpherePlatformVCenterSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailureDomains != nil { - in, out := &in.FailureDomains, &out.FailureDomains - *out = make([]VSpherePlatformFailureDomainSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.NodeNetworking.DeepCopyInto(&out.NodeNetworking) - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]IP, len(*in)) - copy(*out, *in) - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. -func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { - if in == nil { - return nil - } - out := new(VSpherePlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { - *out = *in - if in.APIServerInternalIPs != nil { - in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IngressIPs != nil { - in, out := &in.IngressIPs, &out.IngressIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(VSpherePlatformLoadBalancer) - **out = **in - } - if in.MachineNetworks != nil { - in, out := &in.MachineNetworks, &out.MachineNetworks - *out = make([]CIDR, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. -func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { - if in == nil { - return nil - } - out := new(VSpherePlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformTopology) DeepCopyInto(out *VSpherePlatformTopology) { - *out = *in - if in.Networks != nil { - in, out := &in.Networks, &out.Networks - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformTopology. -func (in *VSpherePlatformTopology) DeepCopy() *VSpherePlatformTopology { - if in == nil { - return nil - } - out := new(VSpherePlatformTopology) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSpherePlatformVCenterSpec) DeepCopyInto(out *VSpherePlatformVCenterSpec) { - *out = *in - if in.Datacenters != nil { - in, out := &in.Datacenters, &out.Datacenters - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformVCenterSpec. -func (in *VSpherePlatformVCenterSpec) DeepCopy() *VSpherePlatformVCenterSpec { - if in == nil { - return nil - } - out := new(VSpherePlatformVCenterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { - *out = *in - out.KubeConfig = in.KubeConfig - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. -func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { - if in == nil { - return nil - } - out := new(WebhookTokenAuthenticator) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml deleted file mode 100644 index ddc7594f..00000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ /dev/null @@ -1,508 +0,0 @@ -apiservers.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: apiservers.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: APIServer - Labels: {} - PluralName: apiservers - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -authentications.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: authentications.config.openshift.io - Capability: "" - Category: "" - FeatureGates: - - ExternalOIDC - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Authentication - Labels: {} - PluralName: authentications - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -builds.config.openshift.io: - Annotations: {} - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: builds.config.openshift.io - Capability: Build - Category: "" - FeatureGates: [] - FilenameOperatorName: openshift-controller-manager - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Build - Labels: {} - PluralName: builds - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -clusteroperators.config.openshift.io: - Annotations: - include.release.openshift.io/self-managed-high-availability: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/497 - CRDName: clusteroperators.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: cluster-version-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_00" - GroupName: config.openshift.io - HasStatus: true - KindName: ClusterOperator - Labels: {} - PluralName: clusteroperators - PrinterColumns: - - description: The version the operator is at. - jsonPath: .status.versions[?(@.name=="operator")].version - name: Version - type: string - - description: Whether the operator is running and stable. - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - description: Whether the operator is processing changes. - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - description: Whether the operator is degraded. - jsonPath: .status.conditions[?(@.type=="Degraded")].status - name: Degraded - type: string - - description: The time the operator's Available status last changed. - jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime - name: Since - type: date - Scope: Cluster - ShortNames: - - co - TopLevelFeatureGates: [] - Version: v1 - -clusterversions.config.openshift.io: - Annotations: - include.release.openshift.io/self-managed-high-availability: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/495 - CRDName: clusterversions.config.openshift.io - Capability: "" - Category: "" - FeatureGates: - - SignatureStores - FilenameOperatorName: cluster-version-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_00" - GroupName: config.openshift.io - HasStatus: true - KindName: ClusterVersion - Labels: {} - PluralName: clusterversions - PrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -consoles.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: consoles.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Console - Labels: {} - PluralName: consoles - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -dnses.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: dnses.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: DNS - Labels: {} - PluralName: dnses - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -featuregates.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: featuregates.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: FeatureGate - Labels: {} - PluralName: featuregates - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -images.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: images.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Image - Labels: {} - PluralName: images - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -imagecontentpolicies.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/874 - CRDName: imagecontentpolicies.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: ImageContentPolicy - Labels: {} - PluralName: imagecontentpolicies - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -imagedigestmirrorsets.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/1126 - CRDName: imagedigestmirrorsets.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: ImageDigestMirrorSet - Labels: {} - PluralName: imagedigestmirrorsets - PrinterColumns: [] - Scope: Cluster - ShortNames: - - idms - TopLevelFeatureGates: [] - Version: v1 - -imagetagmirrorsets.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/1126 - CRDName: imagetagmirrorsets.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: ImageTagMirrorSet - Labels: {} - PluralName: imagetagmirrorsets - PrinterColumns: [] - Scope: Cluster - ShortNames: - - itms - TopLevelFeatureGates: [] - Version: v1 - -infrastructures.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: infrastructures.config.openshift.io - Capability: "" - Category: "" - FeatureGates: - - BareMetalLoadBalancer - - GCPClusterHostedDNS - - GCPLabelsTags - - VSphereControlPlaneMachineSet - - VSphereMultiVCenters - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Infrastructure - Labels: {} - PluralName: infrastructures - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -ingresses.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: ingresses.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Ingress - Labels: {} - PluralName: ingresses - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -networks.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: networks.config.openshift.io - Capability: "" - Category: "" - FeatureGates: - - NetworkDiagnosticsConfig - - NetworkLiveMigration - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: false - KindName: Network - Labels: {} - PluralName: networks - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -nodes.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/1107 - CRDName: nodes.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Node - Labels: {} - PluralName: nodes - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -oauths.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: oauths.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: OAuth - Labels: {} - PluralName: oauths - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -operatorhubs.config.openshift.io: - Annotations: {} - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: operatorhubs.config.openshift.io - Capability: marketplace - Category: "" - FeatureGates: [] - FilenameOperatorName: marketplace - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_03" - GroupName: config.openshift.io - HasStatus: true - KindName: OperatorHub - Labels: {} - PluralName: operatorhubs - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -projects.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: projects.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Project - Labels: {} - PluralName: projects - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -proxies.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: proxies.config.openshift.io - Capability: "" - Category: "" - FeatureGates: [] - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_03" - GroupName: config.openshift.io - HasStatus: true - KindName: Proxy - Labels: {} - PluralName: proxies - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - -schedulers.config.openshift.io: - Annotations: - release.openshift.io/bootstrap-required: "true" - ApprovedPRNumber: https://github.com/openshift/api/pull/470 - CRDName: schedulers.config.openshift.io - Capability: "" - Category: "" - FeatureGates: - - DynamicResourceAllocation - FilenameOperatorName: config-operator - FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_10" - GroupName: config.openshift.io - HasStatus: true - KindName: Scheduler - Labels: {} - PluralName: schedulers - PrinterColumns: [] - Scope: Cluster - ShortNames: null - TopLevelFeatureGates: [] - Version: v1 - diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index e5e9bdb8..00000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,2600 +0,0 @@ -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_AdmissionConfig = map[string]string{ - "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", - "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", -} - -func (AdmissionConfig) SwaggerDoc() map[string]string { - return map_AdmissionConfig -} - -var map_AdmissionPluginConfig = map[string]string{ - "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", -} - -func (AdmissionPluginConfig) SwaggerDoc() map[string]string { - return map_AdmissionPluginConfig -} - -var map_AuditConfig = map[string]string{ - "": "AuditConfig holds configuration for the audit capabilities", - "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", - "auditFilePath": "All requests coming to the apiserver will be logged to this file.", - "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", - "maximumRetainedFiles": "Maximum number of old log files to retain.", - "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", - "logFormat": "Format of saved audits (legacy or json).", - "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", - "webHookMode": "Strategy for sending audit events (block or batch).", -} - -func (AuditConfig) SwaggerDoc() map[string]string { - return map_AuditConfig -} - -var map_CertInfo = map[string]string{ - "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", -} - -func (CertInfo) SwaggerDoc() map[string]string { - return map_CertInfo -} - -var map_ClientConnectionOverrides = map[string]string{ - "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", - "contentType": "contentType is the content type used when sending data to the server from this client.", - "qps": "qps controls the number of queries per second allowed for this connection.", - "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", -} - -func (ClientConnectionOverrides) SwaggerDoc() map[string]string { - return map_ClientConnectionOverrides -} - -var map_ConfigMapFileReference = map[string]string{ - "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", - "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", -} - -func (ConfigMapFileReference) SwaggerDoc() map[string]string { - return map_ConfigMapFileReference -} - -var map_ConfigMapNameReference = map[string]string{ - "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", - "name": "name is the metadata.name of the referenced config map", -} - -func (ConfigMapNameReference) SwaggerDoc() map[string]string { - return map_ConfigMapNameReference -} - -var map_DelegatedAuthentication = map[string]string{ - "": "DelegatedAuthentication allows authentication to be disabled.", - "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", -} - -func (DelegatedAuthentication) SwaggerDoc() map[string]string { - return map_DelegatedAuthentication -} - -var map_DelegatedAuthorization = map[string]string{ - "": "DelegatedAuthorization allows authorization to be disabled.", - "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", -} - -func (DelegatedAuthorization) SwaggerDoc() map[string]string { - return map_DelegatedAuthorization -} - -var map_EtcdConnectionInfo = map[string]string{ - "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", -} - -func (EtcdConnectionInfo) SwaggerDoc() map[string]string { - return map_EtcdConnectionInfo -} - -var map_EtcdStorageConfig = map[string]string{ - "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", -} - -func (EtcdStorageConfig) SwaggerDoc() map[string]string { - return map_EtcdStorageConfig -} - -var map_GenericAPIServerConfig = map[string]string{ - "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", - "servingInfo": "servingInfo describes how to start serving", - "corsAllowedOrigins": "corsAllowedOrigins", - "auditConfig": "auditConfig describes how to configure audit information", - "storageConfig": "storageConfig contains information about how to use", - "admission": "admissionConfig holds information about how to configure admission.", -} - -func (GenericAPIServerConfig) SwaggerDoc() map[string]string { - return map_GenericAPIServerConfig -} - -var map_GenericControllerConfig = map[string]string{ - "": "GenericControllerConfig provides information to configure a controller", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", - "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", - "authentication": "authentication allows configuration of authentication for the endpoints", - "authorization": "authorization allows configuration of authentication for the endpoints", -} - -func (GenericControllerConfig) SwaggerDoc() map[string]string { - return map_GenericControllerConfig -} - -var map_HTTPServingInfo = map[string]string{ - "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", -} - -func (HTTPServingInfo) SwaggerDoc() map[string]string { - return map_HTTPServingInfo -} - -var map_KubeClientConfig = map[string]string{ - "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", - "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", -} - -func (KubeClientConfig) SwaggerDoc() map[string]string { - return map_KubeClientConfig -} - -var map_LeaderElection = map[string]string{ - "": "LeaderElection provides information to elect a leader", - "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", - "namespace": "namespace indicates which namespace the resource is in", - "name": "name indicates what name to use for the resource", - "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", - "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", - "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", -} - -func (LeaderElection) SwaggerDoc() map[string]string { - return map_LeaderElection -} - -var map_MaxAgePolicy = map[string]string{ - "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", - "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", - "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", -} - -func (MaxAgePolicy) SwaggerDoc() map[string]string { - return map_MaxAgePolicy -} - -var map_NamedCertificate = map[string]string{ - "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", -} - -func (NamedCertificate) SwaggerDoc() map[string]string { - return map_NamedCertificate -} - -var map_RemoteConnectionInfo = map[string]string{ - "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", -} - -func (RemoteConnectionInfo) SwaggerDoc() map[string]string { - return map_RemoteConnectionInfo -} - -var map_RequiredHSTSPolicy = map[string]string{ - "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", - "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", - "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", - "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", - "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", -} - -func (RequiredHSTSPolicy) SwaggerDoc() map[string]string { - return map_RequiredHSTSPolicy -} - -var map_SecretNameReference = map[string]string{ - "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", - "name": "name is the metadata.name of the referenced secret", -} - -func (SecretNameReference) SwaggerDoc() map[string]string { - return map_SecretNameReference -} - -var map_ServingInfo = map[string]string{ - "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", -} - -func (ServingInfo) SwaggerDoc() map[string]string { - return map_ServingInfo -} - -var map_StringSource = map[string]string{ - "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", -} - -func (StringSource) SwaggerDoc() map[string]string { - return map_StringSource -} - -var map_StringSourceSpec = map[string]string{ - "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", -} - -func (StringSourceSpec) SwaggerDoc() map[string]string { - return map_StringSourceSpec -} - -var map_APIServer = map[string]string{ - "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (APIServer) SwaggerDoc() map[string]string { - return map_APIServer -} - -var map_APIServerEncryption = map[string]string{ - "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", -} - -func (APIServerEncryption) SwaggerDoc() map[string]string { - return map_APIServerEncryption -} - -var map_APIServerList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (APIServerList) SwaggerDoc() map[string]string { - return map_APIServerList -} - -var map_APIServerNamedServingCert = map[string]string{ - "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", - "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", - "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", -} - -func (APIServerNamedServingCert) SwaggerDoc() map[string]string { - return map_APIServerNamedServingCert -} - -var map_APIServerServingCerts = map[string]string{ - "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", -} - -func (APIServerServingCerts) SwaggerDoc() map[string]string { - return map_APIServerServingCerts -} - -var map_APIServerSpec = map[string]string{ - "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", - "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", - "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", - "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", - "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", -} - -func (APIServerSpec) SwaggerDoc() map[string]string { - return map_APIServerSpec -} - -var map_Audit = map[string]string{ - "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", - "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", -} - -func (Audit) SwaggerDoc() map[string]string { - return map_Audit -} - -var map_AuditCustomRule = map[string]string{ - "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", - "group": "group is a name of group a request user must be member of in order to this profile to apply.", - "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", -} - -func (AuditCustomRule) SwaggerDoc() map[string]string { - return map_AuditCustomRule -} - -var map_Authentication = map[string]string{ - "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Authentication) SwaggerDoc() map[string]string { - return map_Authentication -} - -var map_AuthenticationList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (AuthenticationList) SwaggerDoc() map[string]string { - return map_AuthenticationList -} - -var map_AuthenticationSpec = map[string]string{ - "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", - "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", - "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", - "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", - "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", - "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", -} - -func (AuthenticationSpec) SwaggerDoc() map[string]string { - return map_AuthenticationSpec -} - -var map_AuthenticationStatus = map[string]string{ - "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", - "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", -} - -func (AuthenticationStatus) SwaggerDoc() map[string]string { - return map_AuthenticationStatus -} - -var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ - "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", - "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", -} - -func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { - return map_DeprecatedWebhookTokenAuthenticator -} - -var map_OIDCClientConfig = map[string]string{ - "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.", -} - -func (OIDCClientConfig) SwaggerDoc() map[string]string { - return map_OIDCClientConfig -} - -var map_OIDCClientReference = map[string]string{ - "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", -} - -func (OIDCClientReference) SwaggerDoc() map[string]string { - return map_OIDCClientReference -} - -var map_OIDCClientStatus = map[string]string{ - "componentName": "ComponentName is the name of the component that will consume a client configuration.", - "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", - "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", -} - -func (OIDCClientStatus) SwaggerDoc() map[string]string { - return map_OIDCClientStatus -} - -var map_OIDCProvider = map[string]string{ - "name": "Name of the OIDC provider", - "issuer": "Issuer describes atributes of the OIDC token issuer", - "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", -} - -func (OIDCProvider) SwaggerDoc() map[string]string { - return map_OIDCProvider -} - -var map_PrefixedClaimMapping = map[string]string{ - "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", -} - -func (PrefixedClaimMapping) SwaggerDoc() map[string]string { - return map_PrefixedClaimMapping -} - -var map_TokenClaimMapping = map[string]string{ - "claim": "Claim is a JWT token claim to be used in the mapping", -} - -func (TokenClaimMapping) SwaggerDoc() map[string]string { - return map_TokenClaimMapping -} - -var map_TokenClaimMappings = map[string]string{ - "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", -} - -func (TokenClaimMappings) SwaggerDoc() map[string]string { - return map_TokenClaimMappings -} - -var map_TokenClaimValidationRule = map[string]string{ - "type": "Type sets the type of the validation rule", - "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", -} - -func (TokenClaimValidationRule) SwaggerDoc() map[string]string { - return map_TokenClaimValidationRule -} - -var map_TokenIssuer = map[string]string{ - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", - "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", -} - -func (TokenIssuer) SwaggerDoc() map[string]string { - return map_TokenIssuer -} - -var map_TokenRequiredClaim = map[string]string{ - "claim": "Claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "RequiredValue is the required value for the claim.", -} - -func (TokenRequiredClaim) SwaggerDoc() map[string]string { - return map_TokenRequiredClaim -} - -var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", -} - -func (UsernameClaimMapping) SwaggerDoc() map[string]string { - return map_UsernameClaimMapping -} - -var map_WebhookTokenAuthenticator = map[string]string{ - "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", - "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", -} - -func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { - return map_WebhookTokenAuthenticator -} - -var map_Build = map[string]string{ - "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the build controller configuration", -} - -func (Build) SwaggerDoc() map[string]string { - return map_Build -} - -var map_BuildDefaults = map[string]string{ - "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", - "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", - "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", - "resources": "Resources defines resource requirements to execute the build.", -} - -func (BuildDefaults) SwaggerDoc() map[string]string { - return map_BuildDefaults -} - -var map_BuildList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (BuildList) SwaggerDoc() map[string]string { - return map_BuildList -} - -var map_BuildOverrides = map[string]string{ - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", - "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", - "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", - "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", -} - -func (BuildOverrides) SwaggerDoc() map[string]string { - return map_BuildOverrides -} - -var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", - "buildDefaults": "BuildDefaults controls the default information for Builds", - "buildOverrides": "BuildOverrides controls override settings for builds", -} - -func (BuildSpec) SwaggerDoc() map[string]string { - return map_BuildSpec -} - -var map_ImageLabel = map[string]string{ - "name": "Name defines the name of the label. It must have non-zero length.", - "value": "Value defines the literal value of the label.", -} - -func (ImageLabel) SwaggerDoc() map[string]string { - return map_ImageLabel -} - -var map_ClusterOperator = map[string]string{ - "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds configuration that could apply to any operator.", - "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", -} - -func (ClusterOperator) SwaggerDoc() map[string]string { - return map_ClusterOperator -} - -var map_ClusterOperatorList = map[string]string{ - "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ClusterOperatorList) SwaggerDoc() map[string]string { - return map_ClusterOperatorList -} - -var map_ClusterOperatorSpec = map[string]string{ - "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", -} - -func (ClusterOperatorSpec) SwaggerDoc() map[string]string { - return map_ClusterOperatorSpec -} - -var map_ClusterOperatorStatus = map[string]string{ - "": "ClusterOperatorStatus provides information about the status of the operator.", - "conditions": "conditions describes the state of the operator's managed and monitored components.", - "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", - "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", - "extension": "extension contains any additional status information specific to the operator which owns this status object.", -} - -func (ClusterOperatorStatus) SwaggerDoc() map[string]string { - return map_ClusterOperatorStatus -} - -var map_ClusterOperatorStatusCondition = map[string]string{ - "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", - "type": "type specifies the aspect reported by this condition.", - "status": "status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", - "reason": "reason is the CamelCase reason for the condition's current status.", - "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", -} - -func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { - return map_ClusterOperatorStatusCondition -} - -var map_ObjectReference = map[string]string{ - "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "group": "group of the referent.", - "resource": "resource of the referent.", - "namespace": "namespace of the referent.", - "name": "name of the referent.", -} - -func (ObjectReference) SwaggerDoc() map[string]string { - return map_ObjectReference -} - -var map_OperandVersion = map[string]string{ - "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", - "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", -} - -func (OperandVersion) SwaggerDoc() map[string]string { - return map_OperandVersion -} - -var map_ClusterCondition = map[string]string{ - "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", - "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", - "promql": "promQL represents a cluster condition based on PromQL.", -} - -func (ClusterCondition) SwaggerDoc() map[string]string { - return map_ClusterCondition -} - -var map_ClusterVersion = map[string]string{ - "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", - "status": "status contains information about the available updates and any in-progress updates.", -} - -func (ClusterVersion) SwaggerDoc() map[string]string { - return map_ClusterVersion -} - -var map_ClusterVersionCapabilitiesSpec = map[string]string{ - "": "ClusterVersionCapabilitiesSpec selects the managed set of optional, core cluster components.", - "baselineCapabilitySet": "baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent.", - "additionalEnabledCapabilities": "additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set.", -} - -func (ClusterVersionCapabilitiesSpec) SwaggerDoc() map[string]string { - return map_ClusterVersionCapabilitiesSpec -} - -var map_ClusterVersionCapabilitiesStatus = map[string]string{ - "": "ClusterVersionCapabilitiesStatus describes the state of optional, core cluster components.", - "enabledCapabilities": "enabledCapabilities lists all the capabilities that are currently managed.", - "knownCapabilities": "knownCapabilities lists all the capabilities known to the current cluster.", -} - -func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string { - return map_ClusterVersionCapabilitiesStatus -} - -var map_ClusterVersionList = map[string]string{ - "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ClusterVersionList) SwaggerDoc() map[string]string { - return map_ClusterVersionList -} - -var map_ClusterVersionSpec = map[string]string{ - "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", - "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", - "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", - "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", - "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", - "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", -} - -func (ClusterVersionSpec) SwaggerDoc() map[string]string { - return map_ClusterVersionSpec -} - -var map_ClusterVersionStatus = map[string]string{ - "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", - "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", - "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", - "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", - "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", - "capabilities": "capabilities describes the state of optional, core cluster components.", - "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", - "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", - "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", -} - -func (ClusterVersionStatus) SwaggerDoc() map[string]string { - return map_ClusterVersionStatus -} - -var map_ComponentOverride = map[string]string{ - "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", - "kind": "kind indentifies which object to override.", - "group": "group identifies the API group that the kind is in.", - "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", - "name": "name is the component's name.", - "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", -} - -func (ComponentOverride) SwaggerDoc() map[string]string { - return map_ComponentOverride -} - -var map_ConditionalUpdate = map[string]string{ - "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", - "release": "release is the target of the update.", - "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", - "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.", -} - -func (ConditionalUpdate) SwaggerDoc() map[string]string { - return map_ConditionalUpdate -} - -var map_ConditionalUpdateRisk = map[string]string{ - "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", - "url": "url contains information about this risk.", - "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", - "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", - "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", -} - -func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { - return map_ConditionalUpdateRisk -} - -var map_PromQLClusterCondition = map[string]string{ - "": "PromQLClusterCondition represents a cluster condition based on PromQL.", - "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", -} - -func (PromQLClusterCondition) SwaggerDoc() map[string]string { - return map_PromQLClusterCondition -} - -var map_Release = map[string]string{ - "": "Release represents an OpenShift release image and associated metadata.", - "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", - "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", - "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", - "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", -} - -func (Release) SwaggerDoc() map[string]string { - return map_Release -} - -var map_SignatureStore = map[string]string{ - "": "SignatureStore represents the URL of custom Signature Store", - "url": "url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty.", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config.", -} - -func (SignatureStore) SwaggerDoc() map[string]string { - return map_SignatureStore -} - -var map_Update = map[string]string{ - "": "Update represents an administrator update request.", - "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", - "version": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.", - "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.", - "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", -} - -func (Update) SwaggerDoc() map[string]string { - return map_Update -} - -var map_UpdateHistory = map[string]string{ - "": "UpdateHistory is a single attempted update to the cluster.", - "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", - "startedTime": "startedTime is the time at which the update was started.", - "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", - "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", - "image": "image is a container image location that contains the update. This value is always populated.", - "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", - "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", -} - -func (UpdateHistory) SwaggerDoc() map[string]string { - return map_UpdateHistory -} - -var map_Console = map[string]string{ - "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Console) SwaggerDoc() map[string]string { - return map_Console -} - -var map_ConsoleAuthentication = map[string]string{ - "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", - "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", -} - -func (ConsoleAuthentication) SwaggerDoc() map[string]string { - return map_ConsoleAuthentication -} - -var map_ConsoleList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ConsoleList) SwaggerDoc() map[string]string { - return map_ConsoleList -} - -var map_ConsoleSpec = map[string]string{ - "": "ConsoleSpec is the specification of the desired behavior of the Console.", -} - -func (ConsoleSpec) SwaggerDoc() map[string]string { - return map_ConsoleSpec -} - -var map_ConsoleStatus = map[string]string{ - "": "ConsoleStatus defines the observed status of the Console.", - "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", -} - -func (ConsoleStatus) SwaggerDoc() map[string]string { - return map_ConsoleStatus -} - -var map_AWSDNSSpec = map[string]string{ - "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", - "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", -} - -func (AWSDNSSpec) SwaggerDoc() map[string]string { - return map_AWSDNSSpec -} - -var map_DNS = map[string]string{ - "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (DNS) SwaggerDoc() map[string]string { - return map_DNS -} - -var map_DNSList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (DNSList) SwaggerDoc() map[string]string { - return map_DNSList -} - -var map_DNSPlatformSpec = map[string]string{ - "": "DNSPlatformSpec holds cloud-provider-specific configuration for DNS administration.", - "type": "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\".\n\nIndividual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults.", - "aws": "aws contains DNS configuration specific to the Amazon Web Services cloud provider.", -} - -func (DNSPlatformSpec) SwaggerDoc() map[string]string { - return map_DNSPlatformSpec -} - -var map_DNSSpec = map[string]string{ - "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", - "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", - "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", - "platform": "platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", -} - -func (DNSSpec) SwaggerDoc() map[string]string { - return map_DNSSpec -} - -var map_DNSZone = map[string]string{ - "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", - "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", - "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", -} - -func (DNSZone) SwaggerDoc() map[string]string { - return map_DNSZone -} - -var map_CustomFeatureGates = map[string]string{ - "enabled": "enabled is a list of all feature gates that you want to force on", - "disabled": "disabled is a list of all feature gates that you want to force off", -} - -func (CustomFeatureGates) SwaggerDoc() map[string]string { - return map_CustomFeatureGates -} - -var map_FeatureGate = map[string]string{ - "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (FeatureGate) SwaggerDoc() map[string]string { - return map_FeatureGate -} - -var map_FeatureGateAttributes = map[string]string{ - "name": "name is the name of the FeatureGate.", -} - -func (FeatureGateAttributes) SwaggerDoc() map[string]string { - return map_FeatureGateAttributes -} - -var map_FeatureGateDetails = map[string]string{ - "version": "version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.", - "enabled": "enabled is a list of all feature gates that are enabled in the cluster for the named version.", - "disabled": "disabled is a list of all feature gates that are disabled in the cluster for the named version.", -} - -func (FeatureGateDetails) SwaggerDoc() map[string]string { - return map_FeatureGateDetails -} - -var map_FeatureGateList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (FeatureGateList) SwaggerDoc() map[string]string { - return map_FeatureGateList -} - -var map_FeatureGateSelection = map[string]string{ - "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", - "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", -} - -func (FeatureGateSelection) SwaggerDoc() map[string]string { - return map_FeatureGateSelection -} - -var map_FeatureGateStatus = map[string]string{ - "conditions": "conditions represent the observations of the current state. Known .status.conditions.type are: \"DeterminationDegraded\"", - "featureGates": "featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list.", -} - -func (FeatureGateStatus) SwaggerDoc() map[string]string { - return map_FeatureGateStatus -} - -var map_Image = map[string]string{ - "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Image) SwaggerDoc() map[string]string { - return map_Image -} - -var map_ImageList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ImageList) SwaggerDoc() map[string]string { - return map_ImageList -} - -var map_ImageSpec = map[string]string{ - "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", - "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", - "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", -} - -func (ImageSpec) SwaggerDoc() map[string]string { - return map_ImageSpec -} - -var map_ImageStatus = map[string]string{ - "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname.", - "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", -} - -func (ImageStatus) SwaggerDoc() map[string]string { - return map_ImageStatus -} - -var map_RegistryLocation = map[string]string{ - "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", -} - -func (RegistryLocation) SwaggerDoc() map[string]string { - return map_RegistryLocation -} - -var map_RegistrySources = map[string]string{ - "": "RegistrySources holds cluster-wide information about how to handle the registries config.", - "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", - "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", - "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", - "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", -} - -func (RegistrySources) SwaggerDoc() map[string]string { - return map_RegistrySources -} - -var map_ImageContentPolicy = map[string]string{ - "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", -} - -func (ImageContentPolicy) SwaggerDoc() map[string]string { - return map_ImageContentPolicy -} - -var map_ImageContentPolicyList = map[string]string{ - "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ImageContentPolicyList) SwaggerDoc() map[string]string { - return map_ImageContentPolicyList -} - -var map_ImageContentPolicySpec = map[string]string{ - "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", - "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", -} - -func (ImageContentPolicySpec) SwaggerDoc() map[string]string { - return map_ImageContentPolicySpec -} - -var map_RepositoryDigestMirrors = map[string]string{ - "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", - "source": "source is the repository that users refer to, e.g. in image pull specifications.", - "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", - "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", -} - -func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { - return map_RepositoryDigestMirrors -} - -var map_ImageDigestMirrorSet = map[string]string{ - "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status contains the observed state of the resource.", -} - -func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { - return map_ImageDigestMirrorSet -} - -var map_ImageDigestMirrorSetList = map[string]string{ - "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string { - return map_ImageDigestMirrorSetList -} - -var map_ImageDigestMirrorSetSpec = map[string]string{ - "": "ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.", - "imageDigestMirrors": "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.", -} - -func (ImageDigestMirrorSetSpec) SwaggerDoc() map[string]string { - return map_ImageDigestMirrorSetSpec -} - -var map_ImageDigestMirrors = map[string]string{ - "": "ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", - "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", - "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", - "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", -} - -func (ImageDigestMirrors) SwaggerDoc() map[string]string { - return map_ImageDigestMirrors -} - -var map_ImageTagMirrorSet = map[string]string{ - "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status contains the observed state of the resource.", -} - -func (ImageTagMirrorSet) SwaggerDoc() map[string]string { - return map_ImageTagMirrorSet -} - -var map_ImageTagMirrorSetList = map[string]string{ - "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ImageTagMirrorSetList) SwaggerDoc() map[string]string { - return map_ImageTagMirrorSetList -} - -var map_ImageTagMirrorSetSpec = map[string]string{ - "": "ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.", - "imageTagMirrors": "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.", -} - -func (ImageTagMirrorSetSpec) SwaggerDoc() map[string]string { - return map_ImageTagMirrorSetSpec -} - -var map_ImageTagMirrors = map[string]string{ - "": "ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.", - "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", - "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using \"ImageDigestMirrorSet\" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", - "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", -} - -func (ImageTagMirrors) SwaggerDoc() map[string]string { - return map_ImageTagMirrors -} - -var map_AWSPlatformSpec = map[string]string{ - "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", - "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", -} - -func (AWSPlatformSpec) SwaggerDoc() map[string]string { - return map_AWSPlatformSpec -} - -var map_AWSPlatformStatus = map[string]string{ - "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", - "region": "region holds the default AWS region for new AWS resources created by the cluster.", - "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", - "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", -} - -func (AWSPlatformStatus) SwaggerDoc() map[string]string { - return map_AWSPlatformStatus -} - -var map_AWSResourceTag = map[string]string{ - "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", - "key": "key is the key of the tag", - "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", -} - -func (AWSResourceTag) SwaggerDoc() map[string]string { - return map_AWSResourceTag -} - -var map_AWSServiceEndpoint = map[string]string{ - "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", - "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", - "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", -} - -func (AWSServiceEndpoint) SwaggerDoc() map[string]string { - return map_AWSServiceEndpoint -} - -var map_AlibabaCloudPlatformSpec = map[string]string{ - "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { - return map_AlibabaCloudPlatformSpec -} - -var map_AlibabaCloudPlatformStatus = map[string]string{ - "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", - "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", - "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", - "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", -} - -func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { - return map_AlibabaCloudPlatformStatus -} - -var map_AlibabaCloudResourceTag = map[string]string{ - "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", - "key": "key is the key of the tag.", - "value": "value is the value of the tag.", -} - -func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { - return map_AlibabaCloudResourceTag -} - -var map_AzurePlatformSpec = map[string]string{ - "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (AzurePlatformSpec) SwaggerDoc() map[string]string { - return map_AzurePlatformSpec -} - -var map_AzurePlatformStatus = map[string]string{ - "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", - "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", - "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", - "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", - "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", - "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", -} - -func (AzurePlatformStatus) SwaggerDoc() map[string]string { - return map_AzurePlatformStatus -} - -var map_AzureResourceTag = map[string]string{ - "": "AzureResourceTag is a tag to apply to Azure resources created for the cluster.", - "key": "key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`.", - "value": "value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.", -} - -func (AzureResourceTag) SwaggerDoc() map[string]string { - return map_AzureResourceTag -} - -var map_BareMetalPlatformLoadBalancer = map[string]string{ - "": "BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.", - "type": "type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", -} - -func (BareMetalPlatformLoadBalancer) SwaggerDoc() map[string]string { - return map_BareMetalPlatformLoadBalancer -} - -var map_BareMetalPlatformSpec = map[string]string{ - "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", -} - -func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { - return map_BareMetalPlatformSpec -} - -var map_BareMetalPlatformStatus = map[string]string{ - "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", - "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", - "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", -} - -func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { - return map_BareMetalPlatformStatus -} - -var map_CloudControllerManagerStatus = map[string]string{ - "": "CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings", - "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nValid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", -} - -func (CloudControllerManagerStatus) SwaggerDoc() map[string]string { - return map_CloudControllerManagerStatus -} - -var map_CloudLoadBalancerConfig = map[string]string{ - "": "CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's Load Balancer configuration needs to be provided so that the DNS solution hosted within the cluster can be configured with those values.", - "dnsType": "dnsType indicates the type of DNS solution in use within the cluster. Its default value of `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution.", - "clusterHosted": "clusterHosted holds the IP addresses of API, API-Int and Ingress Load Balancers on Cloud Platforms. The DNS solution hosted within the cluster use these IP addresses to provide resolution for API, API-Int and Ingress services.", -} - -func (CloudLoadBalancerConfig) SwaggerDoc() map[string]string { - return map_CloudLoadBalancerConfig -} - -var map_CloudLoadBalancerIPs = map[string]string{ - "": "CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, API-Int and Ingress Load balancers. They will be populated as soon as the respective Load Balancers have been configured. These values are utilized to configure the DNS solution hosted within the cluster.", - "apiIntLoadBalancerIPs": "apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the apiIntLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", - "apiLoadBalancerIPs": "apiLoadBalancerIPs holds Load Balancer IPs for the API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Could be empty for private clusters. Entries in the apiLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", - "ingressLoadBalancerIPs": "ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the ingressLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", -} - -func (CloudLoadBalancerIPs) SwaggerDoc() map[string]string { - return map_CloudLoadBalancerIPs -} - -var map_EquinixMetalPlatformSpec = map[string]string{ - "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { - return map_EquinixMetalPlatformSpec -} - -var map_EquinixMetalPlatformStatus = map[string]string{ - "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", -} - -func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { - return map_EquinixMetalPlatformStatus -} - -var map_ExternalPlatformSpec = map[string]string{ - "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", - "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", -} - -func (ExternalPlatformSpec) SwaggerDoc() map[string]string { - return map_ExternalPlatformSpec -} - -var map_ExternalPlatformStatus = map[string]string{ - "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.", - "cloudControllerManager": "cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", -} - -func (ExternalPlatformStatus) SwaggerDoc() map[string]string { - return map_ExternalPlatformStatus -} - -var map_GCPPlatformSpec = map[string]string{ - "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (GCPPlatformSpec) SwaggerDoc() map[string]string { - return map_GCPPlatformSpec -} - -var map_GCPPlatformStatus = map[string]string{ - "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", - "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", - "region": "region holds the region for new GCP resources created for the cluster.", - "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", - "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", - "cloudLoadBalancerConfig": "cloudLoadBalancerConfig is a union that contains the IP addresses of API, API-Int and Ingress Load Balancers created on the cloud platform. These values would not be populated on on-prem platforms. These Load Balancer IPs are used to configure the in-cluster DNS instances for API, API-Int and Ingress services. `dnsType` is expected to be set to `ClusterHosted` when these Load Balancer IP addresses are populated and used.", -} - -func (GCPPlatformStatus) SwaggerDoc() map[string]string { - return map_GCPPlatformStatus -} - -var map_GCPResourceLabel = map[string]string{ - "": "GCPResourceLabel is a label to apply to GCP resources created for the cluster.", - "key": "key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`.", - "value": "value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.", -} - -func (GCPResourceLabel) SwaggerDoc() map[string]string { - return map_GCPResourceLabel -} - -var map_GCPResourceTag = map[string]string{ - "": "GCPResourceTag is a tag to apply to GCP resources created for the cluster.", - "parentID": "parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.", - "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.", - "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.", -} - -func (GCPResourceTag) SwaggerDoc() map[string]string { - return map_GCPResourceTag -} - -var map_IBMCloudPlatformSpec = map[string]string{ - "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { - return map_IBMCloudPlatformSpec -} - -var map_IBMCloudPlatformStatus = map[string]string{ - "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", - "location": "Location is where the cluster has been deployed", - "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", - "providerType": "ProviderType indicates the type of cluster that was created", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", -} - -func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { - return map_IBMCloudPlatformStatus -} - -var map_IBMCloudServiceEndpoint = map[string]string{ - "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", - "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", - "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", -} - -func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string { - return map_IBMCloudServiceEndpoint -} - -var map_Infrastructure = map[string]string{ - "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Infrastructure) SwaggerDoc() map[string]string { - return map_Infrastructure -} - -var map_InfrastructureList = map[string]string{ - "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (InfrastructureList) SwaggerDoc() map[string]string { - return map_InfrastructureList -} - -var map_InfrastructureSpec = map[string]string{ - "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", - "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", - "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", -} - -func (InfrastructureSpec) SwaggerDoc() map[string]string { - return map_InfrastructureSpec -} - -var map_InfrastructureStatus = map[string]string{ - "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", - "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", - "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", - "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", - "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", - "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", - "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", - "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", - "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", - "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", -} - -func (InfrastructureStatus) SwaggerDoc() map[string]string { - return map_InfrastructureStatus -} - -var map_KubevirtPlatformSpec = map[string]string{ - "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { - return map_KubevirtPlatformSpec -} - -var map_KubevirtPlatformStatus = map[string]string{ - "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", -} - -func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { - return map_KubevirtPlatformStatus -} - -var map_NutanixFailureDomain = map[string]string{ - "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", - "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", - "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", - "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", -} - -func (NutanixFailureDomain) SwaggerDoc() map[string]string { - return map_NutanixFailureDomain -} - -var map_NutanixPlatformLoadBalancer = map[string]string{ - "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.", - "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", -} - -func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string { - return map_NutanixPlatformLoadBalancer -} - -var map_NutanixPlatformSpec = map[string]string{ - "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", - "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", - "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", - "failureDomains": "failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster.", -} - -func (NutanixPlatformSpec) SwaggerDoc() map[string]string { - return map_NutanixPlatformSpec -} - -var map_NutanixPlatformStatus = map[string]string{ - "": "NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", - "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", -} - -func (NutanixPlatformStatus) SwaggerDoc() map[string]string { - return map_NutanixPlatformStatus -} - -var map_NutanixPrismElementEndpoint = map[string]string{ - "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)", - "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).", - "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", -} - -func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string { - return map_NutanixPrismElementEndpoint -} - -var map_NutanixPrismEndpoint = map[string]string{ - "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)", - "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)", - "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)", -} - -func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { - return map_NutanixPrismEndpoint -} - -var map_NutanixResourceIdentifier = map[string]string{ - "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", - "type": "type is the identifier type to use for this resource.", - "uuid": "uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.", - "name": "name is the resource name in the PC. It cannot be empty if the type is Name.", -} - -func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { - return map_NutanixResourceIdentifier -} - -var map_OpenStackPlatformLoadBalancer = map[string]string{ - "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.", - "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", -} - -func (OpenStackPlatformLoadBalancer) SwaggerDoc() map[string]string { - return map_OpenStackPlatformLoadBalancer -} - -var map_OpenStackPlatformSpec = map[string]string{ - "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", -} - -func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { - return map_OpenStackPlatformSpec -} - -var map_OpenStackPlatformStatus = map[string]string{ - "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", - "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", - "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", - "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", -} - -func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { - return map_OpenStackPlatformStatus -} - -var map_OvirtPlatformLoadBalancer = map[string]string{ - "": "OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.", - "type": "type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", -} - -func (OvirtPlatformLoadBalancer) SwaggerDoc() map[string]string { - return map_OvirtPlatformLoadBalancer -} - -var map_OvirtPlatformSpec = map[string]string{ - "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", -} - -func (OvirtPlatformSpec) SwaggerDoc() map[string]string { - return map_OvirtPlatformSpec -} - -var map_OvirtPlatformStatus = map[string]string{ - "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", - "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", - "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", -} - -func (OvirtPlatformStatus) SwaggerDoc() map[string]string { - return map_OvirtPlatformStatus -} - -var map_PlatformSpec = map[string]string{ - "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", - "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", -} - -func (PlatformSpec) SwaggerDoc() map[string]string { - return map_PlatformSpec -} - -var map_PlatformStatus = map[string]string{ - "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", - "external": "External contains settings specific to the generic External infrastructure provider.", -} - -func (PlatformStatus) SwaggerDoc() map[string]string { - return map_PlatformStatus -} - -var map_PowerVSPlatformSpec = map[string]string{ - "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", -} - -func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { - return map_PowerVSPlatformSpec -} - -var map_PowerVSPlatformStatus = map[string]string{ - "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", - "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", - "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", - "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", -} - -func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { - return map_PowerVSPlatformStatus -} - -var map_PowerVSServiceEndpoint = map[string]string{ - "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", - "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud", - "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", -} - -func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { - return map_PowerVSServiceEndpoint -} - -var map_VSpherePlatformFailureDomainSpec = map[string]string{ - "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", - "name": "name defines the arbitrary but unique name of a failure domain.", - "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", - "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", - "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", - "topology": "Topology describes a given failure domain using vSphere constructs", -} - -func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { - return map_VSpherePlatformFailureDomainSpec -} - -var map_VSpherePlatformLoadBalancer = map[string]string{ - "": "VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.", - "type": "type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", -} - -func (VSpherePlatformLoadBalancer) SwaggerDoc() map[string]string { - return map_VSpherePlatformLoadBalancer -} - -var map_VSpherePlatformNodeNetworking = map[string]string{ - "": "VSpherePlatformNodeNetworking holds the external and internal node networking spec.", - "external": "external represents the network configuration of the node that is externally routable.", - "internal": "internal represents the network configuration of the node that is routable only within the cluster.", -} - -func (VSpherePlatformNodeNetworking) SwaggerDoc() map[string]string { - return map_VSpherePlatformNodeNetworking -} - -var map_VSpherePlatformNodeNetworkingSpec = map[string]string{ - "": "VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for including and excluding IP ranges in the cloud provider. This would be used for example when multiple network adapters are attached to a guest to help determine which IP address the cloud config manager should use for the external and internal node networking.", - "networkSubnetCidr": "networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields.", - "network": "network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'`", - "excludeNetworkSubnetCidr": "excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields.", -} - -func (VSpherePlatformNodeNetworkingSpec) SwaggerDoc() map[string]string { - return map_VSpherePlatformNodeNetworkingSpec -} - -var map_VSpherePlatformSpec = map[string]string{ - "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. In the future the cloud provider operator, storage operator and machine operator will use these fields for configuration.", - "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported.", - "failureDomains": "failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used.", - "nodeNetworking": "nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", -} - -func (VSpherePlatformSpec) SwaggerDoc() map[string]string { - return map_VSpherePlatformSpec -} - -var map_VSpherePlatformStatus = map[string]string{ - "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", - "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", - "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", - "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", - "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", - "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", - "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", - "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", -} - -func (VSpherePlatformStatus) SwaggerDoc() map[string]string { - return map_VSpherePlatformStatus -} - -var map_VSpherePlatformTopology = map[string]string{ - "": "VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, computeCluster, networks, datastore and resourcePool - to provision virtual machines.", - "datacenter": "datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters.", - "computeCluster": "computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters.", - "networks": "networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/.", - "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", - "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", - "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", - "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.", -} - -func (VSpherePlatformTopology) SwaggerDoc() map[string]string { - return map_VSpherePlatformTopology -} - -var map_VSpherePlatformVCenterSpec = map[string]string{ - "": "VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM.", - "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", - "port": "port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time.", - "datacenters": "The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology.", -} - -func (VSpherePlatformVCenterSpec) SwaggerDoc() map[string]string { - return map_VSpherePlatformVCenterSpec -} - -var map_AWSIngressSpec = map[string]string{ - "": "AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", - "type": "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", -} - -func (AWSIngressSpec) SwaggerDoc() map[string]string { - return map_AWSIngressSpec -} - -var map_ComponentRouteSpec = map[string]string{ - "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", - "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", - "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", - "hostname": "hostname is the hostname that should be used by the route.", - "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", -} - -func (ComponentRouteSpec) SwaggerDoc() map[string]string { - return map_ComponentRouteSpec -} - -var map_ComponentRouteStatus = map[string]string{ - "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", - "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", - "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", - "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", - "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", - "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", - "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", - "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", -} - -func (ComponentRouteStatus) SwaggerDoc() map[string]string { - return map_ComponentRouteStatus -} - -var map_Ingress = map[string]string{ - "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Ingress) SwaggerDoc() map[string]string { - return map_Ingress -} - -var map_IngressList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (IngressList) SwaggerDoc() map[string]string { - return map_IngressList -} - -var map_IngressPlatformSpec = map[string]string{ - "": "IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", -} - -func (IngressPlatformSpec) SwaggerDoc() map[string]string { - return map_IngressPlatformSpec -} - -var map_IngressSpec = map[string]string{ - "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", - "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", - "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", - "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.", - "loadBalancer": "loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift.", -} - -func (IngressSpec) SwaggerDoc() map[string]string { - return map_IngressSpec -} - -var map_IngressStatus = map[string]string{ - "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", - "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers", -} - -func (IngressStatus) SwaggerDoc() map[string]string { - return map_IngressStatus -} - -var map_LoadBalancer = map[string]string{ - "platform": "platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", -} - -func (LoadBalancer) SwaggerDoc() map[string]string { - return map_LoadBalancer -} - -var map_ClusterNetworkEntry = map[string]string{ - "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", - "cidr": "The complete block for pod IPs.", - "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", -} - -func (ClusterNetworkEntry) SwaggerDoc() map[string]string { - return map_ClusterNetworkEntry -} - -var map_ExternalIPConfig = map[string]string{ - "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", - "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", - "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", -} - -func (ExternalIPConfig) SwaggerDoc() map[string]string { - return map_ExternalIPConfig -} - -var map_ExternalIPPolicy = map[string]string{ - "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", - "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", - "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", -} - -func (ExternalIPPolicy) SwaggerDoc() map[string]string { - return map_ExternalIPPolicy -} - -var map_MTUMigration = map[string]string{ - "": "MTUMigration contains infomation about MTU migration.", - "network": "Network contains MTU migration configuration for the default network.", - "machine": "Machine contains MTU migration configuration for the machine's uplink.", -} - -func (MTUMigration) SwaggerDoc() map[string]string { - return map_MTUMigration -} - -var map_MTUMigrationValues = map[string]string{ - "": "MTUMigrationValues contains the values for a MTU migration.", - "to": "To is the MTU to migrate to.", - "from": "From is the MTU to migrate from.", -} - -func (MTUMigrationValues) SwaggerDoc() map[string]string { - return map_MTUMigrationValues -} - -var map_Network = map[string]string{ - "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Network) SwaggerDoc() map[string]string { - return map_Network -} - -var map_NetworkDiagnostics = map[string]string{ - "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.", - "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.", - "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.", -} - -func (NetworkDiagnostics) SwaggerDoc() map[string]string { - return map_NetworkDiagnostics -} - -var map_NetworkDiagnosticsSourcePlacement = map[string]string{ - "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components", - "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", - "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.", -} - -func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string { - return map_NetworkDiagnosticsSourcePlacement -} - -var map_NetworkDiagnosticsTargetPlacement = map[string]string{ - "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components", - "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", - "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.", -} - -func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string { - return map_NetworkDiagnosticsTargetPlacement -} - -var map_NetworkList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (NetworkList) SwaggerDoc() map[string]string { - return map_NetworkList -} - -var map_NetworkMigration = map[string]string{ - "": "NetworkMigration represents the cluster network configuration.", - "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", - "mtu": "MTU contains the MTU migration configuration.", -} - -func (NetworkMigration) SwaggerDoc() map[string]string { - return map_NetworkMigration -} - -var map_NetworkSpec = map[string]string{ - "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", - "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", - "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", - "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", - "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", - "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", -} - -func (NetworkSpec) SwaggerDoc() map[string]string { - return map_NetworkSpec -} - -var map_NetworkStatus = map[string]string{ - "": "NetworkStatus is the current network configuration.", - "clusterNetwork": "IP address pool to use for pod IPs.", - "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", - "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", - "migration": "Migration contains the cluster network migration configuration.", - "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\", \"NetworkTypeMigrationOriginalCNIPurged\" and \"NetworkDiagnosticsAvailable\"", -} - -func (NetworkStatus) SwaggerDoc() map[string]string { - return map_NetworkStatus -} - -var map_Node = map[string]string{ - "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values.", -} - -func (Node) SwaggerDoc() map[string]string { - return map_Node -} - -var map_NodeList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (NodeList) SwaggerDoc() map[string]string { - return map_NodeList -} - -var map_NodeSpec = map[string]string{ - "cgroupMode": "CgroupMode determines the cgroups version on the node", - "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", -} - -func (NodeSpec) SwaggerDoc() map[string]string { - return map_NodeSpec -} - -var map_BasicAuthIdentityProvider = map[string]string{ - "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", -} - -func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { - return map_BasicAuthIdentityProvider -} - -var map_GitHubIdentityProvider = map[string]string{ - "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", - "clientID": "clientID is the oauth client ID", - "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", - "organizations": "organizations optionally restricts which organizations are allowed to log in", - "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", - "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", -} - -func (GitHubIdentityProvider) SwaggerDoc() map[string]string { - return map_GitHubIdentityProvider -} - -var map_GitLabIdentityProvider = map[string]string{ - "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", - "clientID": "clientID is the oauth client ID", - "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", - "url": "url is the oauth server base URL", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", -} - -func (GitLabIdentityProvider) SwaggerDoc() map[string]string { - return map_GitLabIdentityProvider -} - -var map_GoogleIdentityProvider = map[string]string{ - "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", - "clientID": "clientID is the oauth client ID", - "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", - "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", -} - -func (GoogleIdentityProvider) SwaggerDoc() map[string]string { - return map_GoogleIdentityProvider -} - -var map_HTPasswdIdentityProvider = map[string]string{ - "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", - "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", -} - -func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { - return map_HTPasswdIdentityProvider -} - -var map_IdentityProvider = map[string]string{ - "": "IdentityProvider provides identities for users authenticating using credentials", - "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", - "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", -} - -func (IdentityProvider) SwaggerDoc() map[string]string { - return map_IdentityProvider -} - -var map_IdentityProviderConfig = map[string]string{ - "": "IdentityProviderConfig contains configuration for using a specific identity provider", - "type": "type identifies the identity provider type for this entry.", - "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", - "github": "github enables user authentication using GitHub credentials", - "gitlab": "gitlab enables user authentication using GitLab credentials", - "google": "google enables user authentication using Google credentials", - "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", - "keystone": "keystone enables user authentication using keystone password credentials", - "ldap": "ldap enables user authentication using LDAP credentials", - "openID": "openID enables user authentication using OpenID credentials", - "requestHeader": "requestHeader enables user authentication using request header credentials", -} - -func (IdentityProviderConfig) SwaggerDoc() map[string]string { - return map_IdentityProviderConfig -} - -var map_KeystoneIdentityProvider = map[string]string{ - "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", - "domainName": "domainName is required for keystone v3", -} - -func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { - return map_KeystoneIdentityProvider -} - -var map_LDAPAttributeMapping = map[string]string{ - "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", - "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", - "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", - "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", - "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", -} - -func (LDAPAttributeMapping) SwaggerDoc() map[string]string { - return map_LDAPAttributeMapping -} - -var map_LDAPIdentityProvider = map[string]string{ - "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", - "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", - "bindDN": "bindDN is an optional DN to bind with during the search phase.", - "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", - "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", - "attributes": "attributes maps LDAP attributes to identities", -} - -func (LDAPIdentityProvider) SwaggerDoc() map[string]string { - return map_LDAPIdentityProvider -} - -var map_OAuth = map[string]string{ - "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (OAuth) SwaggerDoc() map[string]string { - return map_OAuth -} - -var map_OAuthList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (OAuthList) SwaggerDoc() map[string]string { - return map_OAuthList -} - -var map_OAuthRemoteConnectionInfo = map[string]string{ - "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "url is the remote URL to connect to", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", - "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", - "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", -} - -func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { - return map_OAuthRemoteConnectionInfo -} - -var map_OAuthSpec = map[string]string{ - "": "OAuthSpec contains desired cluster auth configuration", - "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", - "tokenConfig": "tokenConfig contains options for authorization and access tokens", - "templates": "templates allow you to customize pages like the login page.", -} - -func (OAuthSpec) SwaggerDoc() map[string]string { - return map_OAuthSpec -} - -var map_OAuthStatus = map[string]string{ - "": "OAuthStatus shows current known state of OAuth server in the cluster", -} - -func (OAuthStatus) SwaggerDoc() map[string]string { - return map_OAuthStatus -} - -var map_OAuthTemplates = map[string]string{ - "": "OAuthTemplates allow for customization of pages like the login page", - "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", - "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", - "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", -} - -func (OAuthTemplates) SwaggerDoc() map[string]string { - return map_OAuthTemplates -} - -var map_OpenIDClaims = map[string]string{ - "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", - "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", - "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", - "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", - "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", -} - -func (OpenIDClaims) SwaggerDoc() map[string]string { - return map_OpenIDClaims -} - -var map_OpenIDIdentityProvider = map[string]string{ - "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", - "clientID": "clientID is the oauth client ID", - "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", - "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", - "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", - "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", - "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", - "claims": "claims mappings", -} - -func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { - return map_OpenIDIdentityProvider -} - -var map_RequestHeaderIdentityProvider = map[string]string{ - "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", - "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", - "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", - "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", - "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", - "headers": "headers is the set of headers to check for identity information", - "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", - "nameHeaders": "nameHeaders is the set of headers to check for the display name", - "emailHeaders": "emailHeaders is the set of headers to check for the email address", -} - -func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { - return map_RequestHeaderIdentityProvider -} - -var map_TokenConfig = map[string]string{ - "": "TokenConfig holds the necessary configuration options for authorization and access tokens", - "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", - "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", - "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", -} - -func (TokenConfig) SwaggerDoc() map[string]string { - return map_TokenConfig -} - -var map_HubSource = map[string]string{ - "": "HubSource is used to specify the hub source and its configuration", - "name": "name is the name of one of the default hub sources", - "disabled": "disabled is used to disable a default hub source on cluster", -} - -func (HubSource) SwaggerDoc() map[string]string { - return map_HubSource -} - -var map_HubSourceStatus = map[string]string{ - "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", - "status": "status indicates success or failure in applying the configuration", - "message": "message provides more information regarding failures", -} - -func (HubSourceStatus) SwaggerDoc() map[string]string { - return map_HubSourceStatus -} - -var map_OperatorHub = map[string]string{ - "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (OperatorHub) SwaggerDoc() map[string]string { - return map_OperatorHub -} - -var map_OperatorHubList = map[string]string{ - "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (OperatorHubList) SwaggerDoc() map[string]string { - return map_OperatorHubList -} - -var map_OperatorHubSpec = map[string]string{ - "": "OperatorHubSpec defines the desired state of OperatorHub", - "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", - "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", -} - -func (OperatorHubSpec) SwaggerDoc() map[string]string { - return map_OperatorHubSpec -} - -var map_OperatorHubStatus = map[string]string{ - "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", - "sources": "sources encapsulates the result of applying the configuration for each hub source", -} - -func (OperatorHubStatus) SwaggerDoc() map[string]string { - return map_OperatorHubStatus -} - -var map_Project = map[string]string{ - "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Project) SwaggerDoc() map[string]string { - return map_Project -} - -var map_ProjectList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ProjectList) SwaggerDoc() map[string]string { - return map_ProjectList -} - -var map_ProjectSpec = map[string]string{ - "": "ProjectSpec holds the project creation configuration.", - "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", - "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", -} - -func (ProjectSpec) SwaggerDoc() map[string]string { - return map_ProjectSpec -} - -var map_TemplateReference = map[string]string{ - "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", - "name": "name is the metadata.name of the referenced project request template", -} - -func (TemplateReference) SwaggerDoc() map[string]string { - return map_TemplateReference -} - -var map_Proxy = map[string]string{ - "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the proxy configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Proxy) SwaggerDoc() map[string]string { - return map_Proxy -} - -var map_ProxyList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (ProxyList) SwaggerDoc() map[string]string { - return map_ProxyList -} - -var map_ProxySpec = map[string]string{ - "": "ProxySpec contains cluster proxy creation configuration.", - "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", - "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", - "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", - "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", - "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", -} - -func (ProxySpec) SwaggerDoc() map[string]string { - return map_ProxySpec -} - -var map_ProxyStatus = map[string]string{ - "": "ProxyStatus shows current known state of the cluster proxy.", - "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", - "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", - "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", -} - -func (ProxyStatus) SwaggerDoc() map[string]string { - return map_ProxyStatus -} - -var map_ProfileCustomizations = map[string]string{ - "": "ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles", - "dynamicResourceAllocation": "dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. Third-party resource drivers are responsible for tracking and allocating resources. Different kinds of resources support arbitrary parameters for defining requirements and initialization. Valid values are Enabled, Disabled and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Disabled.", -} - -func (ProfileCustomizations) SwaggerDoc() map[string]string { - return map_ProfileCustomizations -} - -var map_Scheduler = map[string]string{ - "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Scheduler) SwaggerDoc() map[string]string { - return map_Scheduler -} - -var map_SchedulerList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (SchedulerList) SwaggerDoc() map[string]string { - return map_SchedulerList -} - -var map_SchedulerSpec = map[string]string{ - "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", - "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", - "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", - "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", - "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", -} - -func (SchedulerSpec) SwaggerDoc() map[string]string { - return map_SchedulerSpec -} - -var map_FeatureGateTests = map[string]string{ - "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", - "tests": "Tests contains an item for every TestName", -} - -func (FeatureGateTests) SwaggerDoc() map[string]string { - return map_FeatureGateTests -} - -var map_TestDetails = map[string]string{ - "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", -} - -func (TestDetails) SwaggerDoc() map[string]string { - return map_TestDetails -} - -var map_TestReporting = map[string]string{ - "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (TestReporting) SwaggerDoc() map[string]string { - return map_TestReporting -} - -var map_TestReportingSpec = map[string]string{ - "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", -} - -func (TestReportingSpec) SwaggerDoc() map[string]string { - return map_TestReportingSpec -} - -var map_CustomTLSProfile = map[string]string{ - "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", -} - -func (CustomTLSProfile) SwaggerDoc() map[string]string { - return map_CustomTLSProfile -} - -var map_IntermediateTLSProfile = map[string]string{ - "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", -} - -func (IntermediateTLSProfile) SwaggerDoc() map[string]string { - return map_IntermediateTLSProfile -} - -var map_ModernTLSProfile = map[string]string{ - "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", -} - -func (ModernTLSProfile) SwaggerDoc() map[string]string { - return map_ModernTLSProfile -} - -var map_OldTLSProfile = map[string]string{ - "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", -} - -func (OldTLSProfile) SwaggerDoc() map[string]string { - return map_OldTLSProfile -} - -var map_TLSProfileSpec = map[string]string{ - "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", - "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", - "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", -} - -func (TLSProfileSpec) SwaggerDoc() map[string]string { - return map_TLSProfileSpec -} - -var map_TLSSecurityProfile = map[string]string{ - "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", - "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", - "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n - DHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-ECDSA-AES128-SHA256\n\n - ECDHE-RSA-AES128-SHA256\n\n - ECDHE-ECDSA-AES128-SHA\n\n - ECDHE-RSA-AES128-SHA\n\n - ECDHE-ECDSA-AES256-SHA384\n\n - ECDHE-RSA-AES256-SHA384\n\n - ECDHE-ECDSA-AES256-SHA\n\n - ECDHE-RSA-AES256-SHA\n\n - DHE-RSA-AES128-SHA256\n\n - DHE-RSA-AES256-SHA256\n\n - AES128-GCM-SHA256\n\n - AES256-GCM-SHA384\n\n - AES128-SHA256\n\n - AES256-SHA256\n\n - AES128-SHA\n\n - AES256-SHA\n\n - DES-CBC3-SHA\n\n minTLSVersion: VersionTLS10", - "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n minTLSVersion: VersionTLS12", - "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n minTLSVersion: VersionTLS13", - "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n minTLSVersion: VersionTLS11", -} - -func (TLSSecurityProfile) SwaggerDoc() map[string]string { - return map_TLSSecurityProfile -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE index e06d2081..74e6ec69 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -176,7 +176,7 @@ Apache License END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. - + To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include @@ -199,4 +199,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index f99dedf4..5566305f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -37,8 +37,15 @@ const ( // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale -// Alertmanager describes an Alertmanager cluster. +// The `Alertmanager` custom resource definition (CRD) defines a desired [Alertmanager](https://prometheus.io/docs/alerting) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage and many more. +// +// For each `Alertmanager` resource, the Operator deploys a `StatefulSet` in the same namespace. When there are two or more configured replicas, the Operator runs the Alertmanager instances in high-availability mode. +// +// The resource defines via label and namespace selectors which `AlertmanagerConfig` objects should be associated to the deployed Alertmanager instances. type Alertmanager struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -222,12 +229,14 @@ type AlertmanagerSpec struct { ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` // AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` - // The AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects match the alerts. - // In the future more options may be added. - AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` // Namespaces to be selected for AlertmanagerConfig discovery. If nil, only // check own namespace. AlertmanagerConfigNamespaceSelector *metav1.LabelSelector `json:"alertmanagerConfigNamespaceSelector,omitempty"` + + // AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects + // process incoming alerts. + AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` + // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) @@ -263,16 +272,31 @@ type AlertmanagerSpec struct { EnableFeatures []string `json:"enableFeatures,omitempty"` } -// AlertmanagerConfigMatcherStrategy defines the strategy used by AlertmanagerConfig objects to match alerts. type AlertmanagerConfigMatcherStrategy struct { - // If set to `OnNamespace`, the operator injects a label matcher matching the namespace of the AlertmanagerConfig object for all its routes and inhibition rules. - // `None` will not add any additional matchers other than the ones specified in the AlertmanagerConfig. - // Default is `OnNamespace`. + // AlertmanagerConfigMatcherStrategyType defines the strategy used by + // AlertmanagerConfig objects to match alerts in the routes and inhibition + // rules. + // + // The default value is `OnNamespace`. + // // +kubebuilder:validation:Enum="OnNamespace";"None" // +kubebuilder:default:="OnNamespace" - Type string `json:"type,omitempty"` + Type AlertmanagerConfigMatcherStrategyType `json:"type,omitempty"` } +type AlertmanagerConfigMatcherStrategyType string + +const ( + // With `OnNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object. + OnNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespace" + + // With `None`, the route and inhbition rules of an AlertmanagerConfig + // object process all incoming alerts. + NoneConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "None" +) + // AlertmanagerConfiguration defines the Alertmanager configuration. // +k8s:openapi-gen=true type AlertmanagerConfiguration struct { @@ -336,6 +360,8 @@ type AlertmanagerStatus struct { AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Alertmanager object. UnavailableReplicas int32 `json:"unavailableReplicas"` + // The selector used to match the pods targeted by this Alertmanager object. + Selector string `json:"selector,omitempty"` // The current state of the Alertmanager object. // +listType=map // +listMapKey=type diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go index aa021750..5a3ced2f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -31,7 +31,14 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="pmon" -// PodMonitor defines monitoring for a set of pods. +// The `PodMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of pods. +// Among other things, it allows to specify: +// * The pods to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `PodMonitor` objects using label and namespace selectors. type PodMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -64,15 +71,15 @@ type PodMonitorSpec struct { // PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // List of endpoints part of this PodMonitor. + // Defines how to scrape metrics from the selected pods. // // +optional PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` - // Label selector to select the Kubernetes `Pod` objects. + // Label selector to select the Kubernetes `Pod` objects to scrape metrics from. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Kubernetes `Pods` objects - // are discovered from. + // `namespaceSelector` defines in which namespace(s) Prometheus should discover the pods. + // By default, the pods are discovered in the same namespace as the `PodMonitor` object but it is possible to select pods across different/all namespaces. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // `sampleLimit` defines a per-scrape limit on the number of scraped samples @@ -127,7 +134,7 @@ type PodMonitorSpec struct { // `attachMetadata` defines additional metadata which is added to the // discovered targets. // - // It requires Prometheus >= v2.37.0. + // It requires Prometheus >= v2.35.0. // // +optional AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go index 4e8427c6..203f7207 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -30,7 +30,13 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="prb" -// Probe defines monitoring for a set of static targets or ingresses. +// The `Probe` custom resource definition (CRD) defines how to scrape metrics from prober exporters such as the [blackbox exporter](https://github.com/prometheus/blackbox_exporter). +// +// The `Probe` resource needs 2 pieces of information: +// * The list of probed addresses which can be defined statically or by discovering Kubernetes Ingress objects. +// * The prober which exposes the availability of probed endpoints (over various protocols such HTTP, TCP, ICMP, ...) as Prometheus metrics. +// +// `Prometheus` and `PrometheusAgent` objects select `Probe` objects using label and namespace selectors. type Probe struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index e22d785f..b3baf248 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -368,6 +368,7 @@ type CommonPrometheusFields struct { // object, which shall be mounted into the Prometheus Pods. // Each Secret is added to the StatefulSet definition as a volume named `secret-`. // The Secrets are mounted into /etc/prometheus/secrets/ in the 'prometheus' container. + // +listType:=set Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. @@ -771,6 +772,15 @@ type CommonPrometheusFields struct { // +listType=map // +listMapKey=name ScrapeClasses []ScrapeClass `json:"scrapeClasses,omitempty"` + + // Defines the service discovery role used to discover targets from + // `ServiceMonitor` objects and Alertmanager endpoints. + // + // If set, the value should be either "Endpoints" or "EndpointSlice". + // If unset, the operator assumes the "Endpoints" role. + // + // +optional + ServiceDiscoveryRole *ServiceDiscoveryRole `json:"serviceDiscoveryRole,omitempty"` } // +kubebuilder:validation:Enum=HTTP;ProcessSignal @@ -784,6 +794,14 @@ const ( ProcessSignalReloadStrategyType ReloadStrategyType = "ProcessSignal" ) +// +kubebuilder:validation:Enum=Endpoints;EndpointSlice +type ServiceDiscoveryRole string + +const ( + EndpointsRole ServiceDiscoveryRole = "Endpoints" + EndpointSliceRole ServiceDiscoveryRole = "EndpointSlice" +) + func (cpf *CommonPrometheusFields) PrometheusURIScheme() string { if cpf.Web != nil && cpf.Web.TLSConfig != nil { return "https" @@ -815,7 +833,13 @@ func (cpf *CommonPrometheusFields) WebRoutePrefix() string { // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale -// Prometheus defines a Prometheus deployment. +// The `Prometheus` custom resource definition (CRD) defines a desired [Prometheus](https://prometheus.io/docs/prometheus) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage, and Alertmanagers where firing alerts should be sent and many more. +// +// For each `Prometheus` resource, the Operator deploys one or several `StatefulSet` objects in the same namespace. The number of StatefulSets is equal to the number of shards which is 1 by default. +// +// The resource defines via label and namespace selectors which `ServiceMonitor`, `PodMonitor`, `Probe` and `PrometheusRule` objects should be associated to the deployed Prometheus instances. +// +// The Operator continuously reconciles the scrape and rules configuration and a sidecar container running in the Prometheus pods triggers a reload of the configuration when needed. type Prometheus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -1060,7 +1084,7 @@ type PrometheusStatus struct { // AlertingSpec defines parameters for alerting configuration of Prometheus servers. // +k8s:openapi-gen=true type AlertingSpec struct { - // AlertmanagerEndpoints Prometheus should fire alerts against. + // Alertmanager endpoints where Prometheus should send alerts to. Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` } @@ -1705,8 +1729,18 @@ type APIServerConfig struct { // +k8s:openapi-gen=true type AlertmanagerEndpoints struct { // Namespace of the Endpoints object. - Namespace string `json:"namespace"` + // + // If not set, the object will be discovered in the namespace of the + // Prometheus object. + // + // +kubebuilder:validation:MinLength:=1 + // +optional + Namespace *string `json:"namespace,omitempty"` + // Name of the Endpoints object in the namespace. + // + // +kubebuilder:validation:MinLength:=1 + // +required Name string `json:"name"` // Port on which the Alertmanager API is exposed. @@ -1969,4 +2003,11 @@ type ScrapeClass struct { // // +optional MetricRelabelings []RelabelConfig `json:"metricRelabelings,omitempty"` + + // AttachMetadata configures additional metadata to the discovered targets. + // When the scrape object defines its own configuration, it takes + // precedence over the scrape class configuration. + // + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go index 8c9a4afe..f0a1cf4b 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheusrule_types.go @@ -30,7 +30,9 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="promrule" -// PrometheusRule defines recording and alerting rules for a Prometheus instance +// The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects. +// +// `Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors. type PrometheusRule struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go index 8002a132..2b977a33 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go @@ -29,7 +29,14 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="smon" -// ServiceMonitor defines monitoring for a set of services. +// The `ServiceMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of services. +// Among other things, it allows to specify: +// * The services to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `ServiceMonitor` objects using label and namespace selectors. type ServiceMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -70,14 +77,14 @@ type ServiceMonitorSpec struct { PodTargetLabels []string `json:"podTargetLabels,omitempty"` // List of endpoints part of this ServiceMonitor. - // - // +optional + // Defines how to scrape metrics from Kubernetes [Endpoints](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) objects. + // In most cases, an Endpoints object is backed by a Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) object with the same name and labels. Endpoints []Endpoint `json:"endpoints"` - // Label selector to select the Kubernetes `Endpoints` objects. + // Label selector to select the Kubernetes `Endpoints` objects to scrape metrics from. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Kubernetes `Endpoints` objects - // are discovered from. + // `namespaceSelector` defines in which namespace(s) Prometheus should discover the services. + // By default, the services are discovered in the same namespace as the `ServiceMonitor` object but it is possible to select pods across different/all namespaces. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // `sampleLimit` defines a per-scrape limit on the number of scraped samples diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index 82c569ee..14908da1 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -38,7 +38,11 @@ const ( // +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 // +kubebuilder:subresource:status -// ThanosRuler defines a ThanosRuler deployment. +// The `ThanosRuler` custom resource definition (CRD) defines a desired [Thanos Ruler](https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md) setup to run in a Kubernetes cluster. +// +// A `ThanosRuler` instance requires at least one compatible Prometheus API endpoint (either Thanos Querier or Prometheus services). +// +// The resource defines via label and namespace selectors which `PrometheusRule` objects should be associated to the deployed Thanos Ruler instances. type ThanosRuler struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go index 5676566f..62d6e29f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -17,6 +17,7 @@ package v1 import ( "errors" "fmt" + "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -531,8 +532,11 @@ type Endpoint struct { } type AttachMetadata struct { - // When set to true, Prometheus must have the `get` permission on the - // `Nodes` objects. + // When set to true, Prometheus attaches node metadata to the discovered + // targets. + // + // The Prometheus service account must have the `list` and `watch` + // permissions on the `Nodes` objects. // // +optional Node *bool `json:"node,omitempty"` @@ -565,6 +569,19 @@ type OAuth2 struct { // // +optional EndpointParams map[string]string `json:"endpointParams,omitempty"` + + // TLS configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // Proxy configuration to use when connecting to the OAuth2 server. + // It requires Prometheus >= v2.43.0. + // It is not supported yet for Alertmanager. + // + // +optional + ProxyConfig `json:",inline"` } type OAuth2ValidationError struct { @@ -590,6 +607,12 @@ func (o *OAuth2) Validate() error { } } + if err := o.TLSConfig.Validate(); err != nil { + return &OAuth2ValidationError{ + err: fmt.Sprintf("invalid OAuth2 tlsConfig: %s", err.Error()), + } + } + return nil } @@ -642,6 +665,16 @@ func (c *SecretOrConfigMap) String() string { return "" } +// +kubebuilder:validation:Enum=TLS10;TLS11;TLS12;TLS13 +type TLSVersion string + +const ( + TLSVersion10 TLSVersion = "TLS10" + TLSVersion11 TLSVersion = "TLS11" + TLSVersion12 TLSVersion = "TLS12" + TLSVersion13 TLSVersion = "TLS13" +) + // SafeTLSConfig specifies safe TLS configuration parameters. // +k8s:openapi-gen=true type SafeTLSConfig struct { @@ -655,16 +688,32 @@ type SafeTLSConfig struct { KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` // Used to verify the hostname for the targets. - //+optional + // +optional ServerName *string `json:"serverName,omitempty"` // Disable target certificate validation. - //+optional + // +optional InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` + + // Minimum acceptable TLS version. + // + // It requires Prometheus >= v2.35.0. + // +optional + MinVersion *TLSVersion `json:"minVersion,omitempty"` + + // Maximum acceptable TLS version. + // + // It requires Prometheus >= v2.41.0. + // +optional + MaxVersion *TLSVersion `json:"maxVersion,omitempty"` } // Validate semantically validates the given SafeTLSConfig. func (c *SafeTLSConfig) Validate() error { + if c == nil { + return nil + } + if c.CA != (SecretOrConfigMap{}) { if err := c.CA.Validate(); err != nil { return fmt.Errorf("ca %s: %w", c.CA.String(), err) @@ -685,6 +734,10 @@ func (c *SafeTLSConfig) Validate() error { return fmt.Errorf("client key specified without client cert") } + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + return nil } @@ -702,6 +755,10 @@ type TLSConfig struct { // Validate semantically validates the given TLSConfig. func (c *TLSConfig) Validate() error { + if c == nil { + return nil + } + if c.CA != (SecretOrConfigMap{}) { if c.CAFile != "" { return fmt.Errorf("cannot specify both caFile and ca") @@ -735,6 +792,10 @@ func (c *TLSConfig) Validate() error { return fmt.Errorf("cannot specify client key without client cert") } + if c.MaxVersion != nil && c.MinVersion != nil && strings.Compare(string(*c.MaxVersion), string(*c.MinVersion)) == -1 { + return fmt.Errorf("maxVersion must more than or equal to minVersion") + } + return nil } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 51c19b2b..bf72f145 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -141,6 +141,11 @@ func (in *AlertmanagerConfiguration) DeepCopy() *AlertmanagerConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } out.Port = in.Port if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig @@ -380,12 +385,12 @@ func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } - out.AlertmanagerConfigMatcherStrategy = in.AlertmanagerConfigMatcherStrategy if in.AlertmanagerConfigNamespaceSelector != nil { in, out := &in.AlertmanagerConfigNamespaceSelector, &out.AlertmanagerConfigNamespaceSelector *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + out.AlertmanagerConfigMatcherStrategy = in.AlertmanagerConfigMatcherStrategy if in.MinReadySeconds != nil { in, out := &in.MinReadySeconds, &out.MinReadySeconds *out = new(uint32) @@ -969,6 +974,11 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ServiceDiscoveryRole != nil { + in, out := &in.ServiceDiscoveryRole, &out.ServiceDiscoveryRole + *out = new(ServiceDiscoveryRole) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonPrometheusFields. @@ -1412,6 +1422,12 @@ func (in *OAuth2) DeepCopyInto(out *OAuth2) { (*out)[key] = val } } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2. @@ -2670,6 +2686,16 @@ func (in *SafeTLSConfig) DeepCopyInto(out *SafeTLSConfig) { *out = new(bool) **out = **in } + if in.MinVersion != nil { + in, out := &in.MinVersion, &out.MinVersion + *out = new(TLSVersion) + **out = **in + } + if in.MaxVersion != nil { + in, out := &in.MaxVersion, &out.MaxVersion + *out = new(TLSVersion) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeTLSConfig. @@ -2709,6 +2735,11 @@ func (in *ScrapeClass) DeepCopyInto(out *ScrapeClass) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeClass. diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 00000000..8902bdaa --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 00000000..bf6e3578 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 00000000..b524b813 --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 00000000..1a0e6dad --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 2a7cf70d..00000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff..00000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go deleted file mode 100644 index fbf1934a..00000000 --- a/vendor/golang.org/x/exp/slices/cmp.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// min is a version of the predeclared function from the Go 1.21 release. -func min[T constraints.Ordered](a, b T) T { - if a < b || isNaN(a) { - return a - } - return b -} - -// max is a version of the predeclared function from the Go 1.21 release. -func max[T constraints.Ordered](a, b T) T { - if a > b || isNaN(a) { - return a - } - return b -} - -// cmpLess is a copy of cmp.Less from the Go 1.21 release. -func cmpLess[T constraints.Ordered](x, y T) bool { - return (isNaN(x) && !isNaN(y)) || x < y -} - -// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. -func cmpCompare[T constraints.Ordered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN && yNaN { - return 0 - } - if xNaN || x < y { - return -1 - } - if yNaN || x > y { - return +1 - } - return 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go deleted file mode 100644 index 46ceac34..00000000 --- a/vendor/golang.org/x/exp/slices/slices.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package slices defines various functions useful with slices of any type. -package slices - -import ( - "unsafe" - - "golang.org/x/exp/constraints" -) - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in increasing index order, and the -// comparison stops at the first unequal pair. -// Floating point NaNs are not considered equal. -func Equal[S ~[]E, E comparable](s1, s2 S) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true -} - -// EqualFunc reports whether two slices are equal using an equality -// function on each pair of elements. If the lengths are different, -// EqualFunc returns false. Otherwise, the elements are compared in -// increasing index order, and the comparison stops at the first index -// for which eq returns false. -func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true -} - -// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair -// of elements. The elements are compared sequentially, starting at index 0, -// until one element is not equal to the other. -// The result of comparing the first non-matching elements is returned. -// If both slices are equal until one of them ends, the shorter slice is -// considered less than the longer one. -// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmpCompare(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 -} - -// CompareFunc is like [Compare] but uses a custom comparison function on each -// pair of elements. -// The result is the first non-zero result of cmp; if cmp always -// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), -// and +1 if len(s1) > len(s2). -func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 -} - -// Index returns the index of the first occurrence of v in s, -// or -1 if not present. -func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 -} - -// IndexFunc returns the first index i satisfying f(s[i]), -// or -1 if none do. -func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Contains reports whether v is present in s. -func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 -} - -// ContainsFunc reports whether at least one -// element e of s satisfies f(e). -func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 -} - -// Insert inserts the values v... into s at index i, -// returning the modified slice. -// The elements at s[i:] are shifted up to make room. -// In the returned slice r, r[i] == v[0], -// and r[i+len(v)] == value originally at r[i]. -// Insert panics if i is out of range. -// This function is O(len(s) + len(v)). -func Insert[S ~[]E, E any](s S, i int, v ...E) S { - m := len(v) - if m == 0 { - return s - } - n := len(s) - if i == n { - return append(s, v...) - } - if n+m > cap(s) { - // Use append rather than make so that we bump the size of - // the slice up to the next storage class. - // This is what Grow does but we don't call Grow because - // that might copy the values twice. - s2 := append(s[:i], make(S, n+m-i)...) - copy(s2[i:], v) - copy(s2[i+m:], s[i:]) - return s2 - } - s = s[:n+m] - - // before: - // s: aaaaaaaabbbbccccccccdddd - // ^ ^ ^ ^ - // i i+m n n+m - // after: - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // - // a are the values that don't move in s. - // v are the values copied in from v. - // b and c are the values from s that are shifted up in index. - // d are the values that get overwritten, never to be seen again. - - if !overlaps(v, s[i+m:]) { - // Easy case - v does not overlap either the c or d regions. - // (It might be in some of a or b, or elsewhere entirely.) - // The data we copy up doesn't write to v at all, so just do it. - - copy(s[i+m:], s[i:]) - - // Now we have - // s: aaaaaaaabbbbbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // Note the b values are duplicated. - - copy(s[i:], v) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s - } - - // The hard case - v overlaps c or d. We can't just shift up - // the data because we'd move or clobber the values we're trying - // to insert. - // So instead, write v on top of d, then rotate. - copy(s[n:], v) - - // Now we have - // s: aaaaaaaabbbbccccccccvvvv - // ^ ^ ^ ^ - // i i+m n n+m - - rotateRight(s[i:], m) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s -} - -// clearSlice sets all elements up to the length of s to the zero value of E. -// We may use the builtin clear func instead, and remove clearSlice, when upgrading -// to Go 1.21+. -func clearSlice[S ~[]E, E any](s S) { - var zero E - for i := range s { - s[i] = zero - } -} - -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. -// Delete is O(len(s)-i), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -// Delete zeroes the elements s[len(s)-(j-i):len(s)]. -func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j:len(s)] // bounds check - - if i == j { - return s - } - - oldlen := len(s) - s = append(s[:i], s[j:]...) - clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC - return s -} - -// DeleteFunc removes any elements from s for which del returns true, -// returning the modified slice. -// DeleteFunc zeroes the elements between the new length and the original length. -func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Replace replaces the elements s[i:j] by the given v, and returns the -// modified slice. Replace panics if s[i:j] is not a valid slice of s. -// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. -func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - - if i == j { - return Insert(s, i, v...) - } - if j == len(s) { - return append(s[:i], v...) - } - - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot > cap(s) { - // Too big to fit, allocate and copy over. - s2 := append(s[:i], make(S, tot-i)...) // See Insert - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 - } - - r := s[:tot] - - if i+len(v) <= j { - // Easy, as v fits in the deleted portion. - copy(r[i:], v) - if i+len(v) != j { - copy(r[i+len(v):], s[j:]) - } - clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC - return r - } - - // We are expanding (v is bigger than j-i). - // The situation is something like this: - // (example has i=4,j=8,len(s)=16,len(v)=6) - // s: aaaaxxxxbbbbbbbbyy - // ^ ^ ^ ^ - // i j len(s) tot - // a: prefix of s - // x: deleted range - // b: more of s - // y: area to expand into - - if !overlaps(r[i+len(v):], v) { - // Easy, as v is not clobbered by the first copy. - copy(r[i+len(v):], s[j:]) - copy(r[i:], v) - return r - } - - // This is a situation where we don't have a single place to which - // we can copy v. Parts of it need to go to two different places. - // We want to copy the prefix of v into y and the suffix into x, then - // rotate |y| spots to the right. - // - // v[2:] v[:2] - // | | - // s: aaaavvvvbbbbbbbbvv - // ^ ^ ^ ^ - // i j len(s) tot - // - // If either of those two destinations don't alias v, then we're good. - y := len(v) - (j - i) // length of y portion - - if !overlaps(r[i:j], v) { - copy(r[i:j], v[y:]) - copy(r[len(s):], v[:y]) - rotateRight(r[i:], y) - return r - } - if !overlaps(r[len(s):], v) { - copy(r[len(s):], v[:y]) - copy(r[i:j], v[y:]) - rotateRight(r[i:], y) - return r - } - - // Now we know that v overlaps both x and y. - // That means that the entirety of b is *inside* v. - // So we don't need to preserve b at all; instead we - // can copy v first, then copy the b part of v out of - // v to the right destination. - k := startIdx(v, s[j:]) - copy(r[i:], v) - copy(r[i+len(v):], r[i+k:]) - return r -} - -// Clone returns a copy of the slice. -// The elements are copied using assignment, so this is a shallow clone. -func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Compact replaces consecutive runs of equal elements with a single copy. -// This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s and returns the modified slice, -// which may have a smaller length. -// Compact zeroes the elements between the new length and the original length. -func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// CompactFunc is like [Compact] but uses an equality function to compare elements. -// For runs of elements that compare equal, CompactFunc keeps the first one. -// CompactFunc zeroes the elements between the new length and the original length. -func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Grow increases the slice's capacity, if necessary, to guarantee space for -// another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. If n is negative or too large to -// allocate the memory, Grow panics. -func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s -} - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} - -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 -// -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - -// TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. -// The follow-cycles algorithm can be 1-write but it is not very cache friendly. - -// rotateLeft rotates b left by n spaces. -// s_final[i] = s_orig[i+r], wrapping around. -func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } -} -func rotateRight[E any](s []E, r int) { - rotateLeft(s, len(s)-r) -} - -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - -// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. -func overlaps[E any](a, b []E) bool { - if len(a) == 0 || len(b) == 0 { - return false - } - elemSize := unsafe.Sizeof(a[0]) - if elemSize == 0 { - return false - } - // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. - // Also see crypto/internal/alias/alias.go:AnyOverlap - return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && - uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) -} - -// startIdx returns the index in haystack where the needle starts. -// prerequisite: the needle must be aliased entirely inside the haystack. -func startIdx[E any](haystack, needle []E) int { - p := &needle[0] - for i := range haystack { - if p == &haystack[i] { - return i - } - } - // TODO: what if the overlap is by a non-integral number of Es? - panic("needle not found") -} - -// Reverse reverses the elements of the slice in place. -func Reverse[S ~[]E, E any](s S) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go deleted file mode 100644 index f58bbc7b..00000000 --- a/vendor/golang.org/x/exp/slices/sort.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp - -package slices - -import ( - "math/bits" - - "golang.org/x/exp/constraints" -) - -// Sort sorts a slice of any ordered type in ascending order. -// When sorting floating-point numbers, NaNs are ordered before other values. -func Sort[S ~[]E, E constraints.Ordered](x S) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) -} - -// SortFunc sorts the slice x in ascending order as determined by the cmp -// function. This sort is not guaranteed to be stable. -// cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b or when a is not comparable to b in the sense -// of the formal definition of Strict Weak Ordering. -// -// SortFunc requires that cmp is a strict weak ordering. -// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -// To indicate 'uncomparable', return 0 from the function. -func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - n := len(x) - pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) -} - -// SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using cmp to compare elements in the same way as [SortFunc]. -func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - stableCmpFunc(x, len(x), cmp) -} - -// IsSorted reports whether x is sorted in ascending order. -func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { - for i := len(x) - 1; i > 0; i-- { - if cmpLess(x[i], x[i-1]) { - return false - } - } - return true -} - -// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the -// comparison function as defined by [SortFunc]. -func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { - for i := len(x) - 1; i > 0; i-- { - if cmp(x[i], x[i-1]) < 0 { - return false - } - } - return true -} - -// Min returns the minimal value in x. It panics if x is empty. -// For floating-point numbers, Min propagates NaNs (any NaN value in x -// forces the output to be NaN). -func Min[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Min: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = min(m, x[i]) - } - return m -} - -// MinFunc returns the minimal value in x, using cmp to compare elements. -// It panics if x is empty. If there is more than one minimal element -// according to the cmp function, MinFunc returns the first one. -func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MinFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) < 0 { - m = x[i] - } - } - return m -} - -// Max returns the maximal value in x. It panics if x is empty. -// For floating-point E, Max propagates NaNs (any NaN value in x -// forces the output to be NaN). -func Max[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Max: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = max(m, x[i]) - } - return m -} - -// MaxFunc returns the maximal value in x, using cmp to compare elements. -// It panics if x is empty. If there is more than one maximal element -// according to the cmp function, MaxFunc returns the first one. -func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MaxFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) > 0 { - m = x[i] - } - } - return m -} - -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. -func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmpLess(x[h], target) { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) -} - -// BinarySearchFunc works like [BinarySearch], but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" -// is defined by cmp. cmp should return 0 if the slice element matches -// the target, a negative number if the slice element precedes the target, -// or a positive number if the slice element follows the target. -// cmp must implement the same ordering as the slice, such that if -// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. -func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -func isNaN[T constraints.Ordered](x T) bool { - return x != x -} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go deleted file mode 100644 index 06f2c7a2..00000000 --- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortCmpFunc sorts data[a:b] using insertion sort. -func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownCmpFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { - child++ - } - if !(cmp(data[first+root], data[first+child]) < 0) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownCmpFunc(data, i, hi, first, cmp) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownCmpFunc(data, lo, i, first, cmp) - } -} - -// pdqsortCmpFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortCmpFunc(data, a, b, cmp) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortCmpFunc(data, a, b, cmp) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsCmpFunc(data, a, b, cmp) - limit-- - } - - pivot, hint := choosePivotCmpFunc(data, a, b, cmp) - if hint == decreasingHint { - reverseRangeCmpFunc(data, a, b, cmp) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortCmpFunc(data, a, b, cmp) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { - mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) - a = mid - continue - } - - mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortCmpFunc(data, a, mid, limit, cmp) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortCmpFunc(data, mid+1, b, limit, cmp) - b = mid - } - } -} - -// partitionCmpFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(cmp(data[a], data[i]) < 0) { - i++ - } - for i <= j && (cmp(data[a], data[j]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(cmp(data[i], data[i-1]) < 0) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotCmpFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentCmpFunc(data, i, &swaps, cmp) - j = medianAdjacentCmpFunc(data, j, &swaps, cmp) - k = medianAdjacentCmpFunc(data, k, &swaps, cmp) - } - // Find the median among i, j, k and stores it into j. - j = medianCmpFunc(data, i, j, k, &swaps, cmp) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { - if cmp(data[b], data[a]) < 0 { - *swaps++ - return b, a - } - return a, b -} - -// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { - a, b = order2CmpFunc(data, a, b, swaps, cmp) - b, c = order2CmpFunc(data, b, c, swaps, cmp) - a, b = order2CmpFunc(data, a, b, swaps, cmp) - return b -} - -// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { - return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) -} - -func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortCmpFunc(data, a, b, cmp) - a = b - b += blockSize - } - insertionSortCmpFunc(data, a, n, cmp) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeCmpFunc(data, a, a+blockSize, b, cmp) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeCmpFunc(data, a, m, n, cmp) - } - blockSize *= 2 - } -} - -// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmp(data[h], data[a]) < 0 { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(cmp(data[m], data[h]) < 0) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(cmp(data[p-c], data[c]) < 0) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateCmpFunc(data, start, m, end, cmp) - } - if a < start && start < mid { - symMergeCmpFunc(data, a, start, mid, cmp) - } - if mid < end && end < b { - symMergeCmpFunc(data, mid, end, b, cmp) - } -} - -// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeCmpFunc(data, m-i, m, j, cmp) - i -= j - } else { - swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) - j -= i - } - } - // i == j - swapRangeCmpFunc(data, m-i, m, i, cmp) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index 99b47c39..00000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { - child++ - } - if !cmpLess(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !cmpLess(data[a-1], data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !cmpLess(data[a], data[i]) { - i++ - } - for i <= j && cmpLess(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !cmpLess(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if cmpLess(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmpLess(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !cmpLess(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !cmpLess(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab1..885c4c59 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 00000000..54be8fd3 --- /dev/null +++ b/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 1350eef2..77741a19 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..e3784123 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f..7688c356 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3e..832414b4 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -336,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -353,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -440,13 +429,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +447,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +485,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +526,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +563,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +603,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +623,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +634,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +656,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +920,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +934,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +965,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +990,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1022,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1046,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1062,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1102,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1355,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1633,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2204,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2855,6 +2899,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3301,7 +3350,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f9..f5968f44 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -203,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -227,40 +240,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -296,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -308,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -339,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -370,11 +388,22 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -432,12 +461,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -499,6 +528,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +538,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -554,6 +571,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -586,7 +605,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -597,7 +623,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -622,6 +648,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -640,9 +682,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -758,44 +801,37 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +843,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +869,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +881,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +900,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -995,7 +1028,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1027,16 +1060,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1049,7 +1104,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1613,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1637,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1668,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -2199,7 +2284,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2215,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2278,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2302,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2345,7 +2447,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) @@ -2529,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -3081,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3103,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3263,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 00000000..b2de2116 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..6ff6bee7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780..ac76165c 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2..48dbb9d8 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..109997d7 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680ea..7ca4fa12 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09e..6ab02b6c 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -552,6 +564,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -655,7 +668,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -665,7 +678,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200a..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4c..230a9454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac..7bf5c04b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 00000000..297e97bc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac17..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b24..ccba391c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2620,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2933,11 +2960,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3238,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3255,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3654,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168..0c00cb3f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b59..dfb36455 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e..d46dcf78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8..3af3248a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -235,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e..292bcf02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -233,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea78..782b7110 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1..84973fd9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b..6d9cbc3b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6..5f9fedbc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cef..bb0026ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a..46120db5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a..5c951634 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61..11a84d5a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c21..f78c4617 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aac..aeb777c3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -234,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533e..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646b..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f6..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5ad..5cc1e8eb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f68..f485dbf4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c23..1893e2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf5..16a4017d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a5..a5459e76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..d003c3d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..0d45a941 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index b102b95a..8daaf3fa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1724,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1768,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1797,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1829,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1897,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1949,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2486,7 +2522,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3766,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3842,10 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3948,7 +3987,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4079,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4606,7 +4745,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5210,7 +5349,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4..2e5d5a44 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..4e613cf6 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd1..4510bfc3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -721,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -890,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1681,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 4d0c1574..51311e20 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2031,6 +2032,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -2158,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3..6f525288 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -247,7 +252,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +354,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -1602,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1634,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2162,6 +2219,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2236,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3113,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3137,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aef..05ff623f 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f5606..df6bf948 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..93a798ab 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0b..958cf38d 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -73,6 +73,15 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] @@ -171,7 +180,9 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s // traverse builds the table of events representing a traversal. func traverse(files []*ast.File) []event { // Preallocate approximate number of events - // based on source file extent. + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) // This makes traverse faster by 4x (!). var extent int for _, f := range files { diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 00000000..b7e95911 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12..08dad769 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..fa790e0f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..fd4d0c83 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..7f67cbb6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..fb8e15e8 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554..ebcb4a8a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830ed..002e0047 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2a..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ea..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d38699..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07..6dea75cd 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c..c7e860fc 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d..87da199a 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 69f1bc33..50af8334 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa..5005beb1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 75b88890..229ea2c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1326,186 +1355,187 @@ func init() { } var fileDescriptor_a8431b6e0aeeb761 = []byte{ - // 2853 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, - 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, - 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, - 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, - 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, - 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, - 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d, - 0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, - 0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, - 0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, - 0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, - 0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6, - 0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, - 0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, - 0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, - 0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, - 0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, - 0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, - 0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, - 0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, - 0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, - 0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, - 0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, - 0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, - 0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, - 0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, - 0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, - 0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, - 0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, - 0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, - 0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, - 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, - 0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, - 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, - 0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, - 0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, - 0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, - 0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, - 0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, - 0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, - 0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, - 0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, - 0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, - 0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, - 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, - 0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, - 0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, - 0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, - 0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, - 0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, - 0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b, - 0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b, - 0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3, - 0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0, - 0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2, - 0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d, - 0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17, - 0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16, - 0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc, - 0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52, - 0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1, - 0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59, - 0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec, - 0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b, - 0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83, - 0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0, - 0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5, - 0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50, - 0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f, - 0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b, - 0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0, - 0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42, - 0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3, - 0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc, - 0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29, - 0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e, - 0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85, - 0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1, - 0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48, - 0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb, - 0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04, - 0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2, - 0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f, - 0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc, - 0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2, - 0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72, - 0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b, - 0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90, - 0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21, - 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, - 0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46, - 0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11, - 0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b, - 0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d, - 0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d, - 0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73, - 0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99, - 0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a, - 0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07, - 0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63, - 0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7, - 0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed, - 0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e, - 0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13, - 0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde, - 0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f, - 0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1, - 0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19, - 0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50, - 0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84, - 0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c, - 0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44, - 0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69, - 0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e, - 0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e, - 0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71, - 0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae, - 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88, - 0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef, - 0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd, - 0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87, - 0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21, - 0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77, - 0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76, - 0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64, - 0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87, - 0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30, - 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58, - 0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd, - 0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75, - 0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47, - 0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e, - 0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55, - 0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15, - 0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75, - 0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a, - 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c, - 0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08, - 0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf, - 0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3, - 0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15, - 0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1, - 0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98, - 0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44, - 0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63, - 0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e, - 0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d, - 0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64, - 0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4, - 0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb, - 0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89, - 0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78, - 0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a, - 0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2, - 0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4, - 0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae, - 0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87, - 0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05, - 0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3, - 0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6, - 0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce, - 0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f, - 0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1, - 0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8, - 0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37, - 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50, - 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c, - 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25, - 0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d, - 0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f, - 0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8, - 0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4, - 0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca, - 0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c, - 0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a, - 0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5, - 0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff, - 0xd4, 0x07, 0x28, 0x00, 0x00, + // 2873 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, + 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, + 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, + 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, + 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, + 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, + 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, + 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, + 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, + 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, + 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, + 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, + 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, + 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, + 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, + 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, + 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, + 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, + 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, + 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, + 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, + 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, + 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, + 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, + 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, + 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, + 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, + 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, + 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, + 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, + 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, + 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, + 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, + 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, + 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, + 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, + 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, + 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, + 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, + 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, + 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, + 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, + 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, + 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, + 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, + 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, + 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, + 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, + 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, + 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, + 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, + 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, + 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, + 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, + 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, + 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, + 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, + 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, + 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, + 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, + 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, + 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, + 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, + 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, + 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, + 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, + 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, + 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, + 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, + 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, + 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, + 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, + 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, + 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, + 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, + 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, + 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, + 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, + 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, + 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, + 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, + 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, + 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, + 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, + 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, + 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, + 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, + 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, + 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, + 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, + 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, + 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, + 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, + 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, + 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, + 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, + 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, + 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, + 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, + 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, + 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, + 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, + 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, + 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, + 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, + 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, + 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, + 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, + 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, + 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, + 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, + 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, + 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, + 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, + 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, + 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, + 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, + 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, + 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, + 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, + 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, + 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, + 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, + 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, + 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, + 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, + 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, + 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, + 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, + 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, + 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, + 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, + 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, + 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, + 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, + 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, + 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, + 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, + 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, + 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, + 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, + 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, + 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, + 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, + 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, + 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, + 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, + 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, + 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, + 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, + 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, + 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, + 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, + 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, + 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, + 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, + 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, + 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, + 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, + 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, + 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, + 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, + 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, + 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, + 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, + 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, + 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, + 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, + 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, + 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, + 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, + 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, + 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, + 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, + 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, + 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, + 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3714,6 +3786,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -4429,6 +4520,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -6443,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2b95700f..18dd0b06 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -324,6 +324,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -460,7 +479,7 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -1209,6 +1228,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 592dcb8a..c748071e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f43..9f302b3f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d4..0333cfdb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 9695ba50..473adb9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1278,6 +1278,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index b736e837..1fa37215 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index a0f709ad..3eba5ba5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -32,6 +32,10 @@ import ( type LabelSelectorValidationOptions struct { // Allow invalid label value in selector AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool } // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. @@ -79,7 +83,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) if !opts.AllowInvalidLabelValueInSelector { @@ -113,6 +119,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi return allErrs } +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs := field.ErrorList{} //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 7d29c504..90cc54a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index d14d4259..fcec5535 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 5e601424..9e22a005 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397f..60c000bc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 00000000..cd78b1df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +func Marshal(src interface{}) ([]byte, error) { + return modes.Encode.Marshal(src) +} + +func Unmarshal(src []byte, dst interface{}) error { + return modes.Decode.Unmarshal(src, dst) +} + +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 00000000..f14cbd6b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 00000000..858529e9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 00000000..895b0def --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go new file mode 100644 index 00000000..61f3f145 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "github.com/fxamacker/cbor/v2" +) + +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() + if err != nil { + panic(err) + } + return diagnostic +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 00000000..c6693138 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index ce77c791..1680c149 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -46,6 +46,7 @@ const ( ContentTypeJSON string = "application/json" ContentTypeYAML string = "application/yaml" ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 9b3c9c8d..1ab8fd39 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index a32fce5a..8054b986 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -116,6 +116,15 @@ func IsUpgradeFailure(err error) bool { return errors.As(err, &upgradeErr) } +// isHTTPSProxyError returns true if error is Gorilla/Websockets HTTPS Proxy dial error; +// false otherwise (see https://github.com/kubernetes/kubernetes/issues/126134). +func IsHTTPSProxyError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "proxy: unknown scheme: https") +} + // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index f358c794..5fd2e16c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3674914f..4fe0c5eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() d := now.Sub(r.lastErrorTime) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index b76129a1..cd961c8c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] { // Clear empties the set. // It is preferable to replace the set with a newly constructed set, // but not all callers can do that (when there are other references to the map). -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN -// can't be cleared because NaN can't be removed. -// For sets containing items of a type that is reflexive for ==, -// this is optimized to a single call to runtime.mapclear(). func (s Set[T]) Clear() Set[T] { - for key := range s { - delete(s, key) - } + clear(s) return s } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 920c113b..6825a808 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index b6c7bbfa..ce37fd8c 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stop stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // ResultChan returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 00000000..7cb7795b --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/kubevirt.io/api/core/v1/types.go b/vendor/kubevirt.io/api/core/v1/types.go index 64aa6e7a..66c93747 100644 --- a/vendor/kubevirt.io/api/core/v1/types.go +++ b/vendor/kubevirt.io/api/core/v1/types.go @@ -1088,6 +1088,10 @@ const ( // VolumesUpdateMigration indicates that the migration copies and update // the volumes VolumesUpdateMigration string = "kubevirt.io/volume-update-migration" + + // ImmediateDataVolumeCreation indicates that the data volumes should be created immediately + // Even if the VM is halted + ImmediateDataVolumeCreation string = "kubevirt.io/immediate-data-volume-creation" ) func NewVMI(name string, uid types.UID) *VirtualMachineInstance { diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go index e2c54111..194e531b 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - ocpconfigv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api" @@ -80,7 +79,7 @@ type StorageSpec struct { // Resources represents the minimum resources the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` + Resources corev1.VolumeResourceRequirements `json:"resources,omitempty"` // VolumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty"` @@ -820,6 +819,10 @@ type CDICertConfig struct { // Server configuration // Certs are rotated and discarded Server *CertConfig `json:"server,omitempty"` + + // Client configuration + // Certs are rotated and discarded + Client *CertConfig `json:"client,omitempty"` } // CDISpec defines our specification for the CDI installation @@ -1010,7 +1013,7 @@ type CDIConfigSpec struct { // +optional DataVolumeTTLSeconds *int32 `json:"dataVolumeTTLSeconds,omitempty"` // TLSSecurityProfile is used by operators to apply cluster-wide TLS security settings to operands. - TLSSecurityProfile *ocpconfigv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // The imagePullSecrets used to pull the container images ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // LogVerbosity overrides the default verbosity level used to initialize loggers @@ -1022,6 +1025,8 @@ type CDIConfigSpec struct { type CDIConfigStatus struct { // The calculated upload proxy URL UploadProxyURL *string `json:"uploadProxyURL,omitempty"` + // UploadProxyCA is the certificate authority of the upload proxy + UploadProxyCA *string `json:"uploadProxyCA,omitempty"` // ImportProxy contains importer pod proxy configuration. // +optional ImportProxy *ImportProxy `json:"importProxy,omitempty"` diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go index 49246a91..fb5ab9c6 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go @@ -420,6 +420,7 @@ func (CDICertConfig) SwaggerDoc() map[string]string { "": "CDICertConfig has the CertConfigs for CDI", "ca": "CA configuration\nCA certs are kept in the CA bundle as long as they are valid", "server": "Server configuration\nCerts are rotated and discarded", + "client": "Client configuration\nCerts are rotated and discarded", } } @@ -517,6 +518,7 @@ func (CDIConfigStatus) SwaggerDoc() map[string]string { return map[string]string{ "": "CDIConfigStatus provides the most recently observed status of the CDI Config resource", "uploadProxyURL": "The calculated upload proxy URL", + "uploadProxyCA": "UploadProxyCA is the certificate authority of the upload proxy", "importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional", "scratchSpaceStorageClass": "The calculated storage class to be used for scratch space", "defaultPodResourceRequirements": "ResourceRequirements describes the compute resource requirements.", diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_tlssecurityprofile.go similarity index 91% rename from vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go rename to vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_tlssecurityprofile.go index b18ef647..046ca82c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_tlssecurityprofile.go @@ -1,4 +1,6 @@ -package v1 +package v1beta1 + +// following copied from github.com/openshift/api/config/v1 // TLSSecurityProfile defines the schema for a TLS security profile. This object // is used by operators to apply TLS security settings to operands. @@ -27,65 +29,35 @@ type TLSSecurityProfile struct { // and looks like this (yaml): // // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - DHE-RSA-AES128-GCM-SHA256 - // // - DHE-RSA-AES256-GCM-SHA384 - // // - DHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-ECDSA-AES128-SHA256 - // // - ECDHE-RSA-AES128-SHA256 - // // - ECDHE-ECDSA-AES128-SHA - // // - ECDHE-RSA-AES128-SHA - // // - ECDHE-ECDSA-AES256-SHA384 - // // - ECDHE-RSA-AES256-SHA384 - // // - ECDHE-ECDSA-AES256-SHA - // // - ECDHE-RSA-AES256-SHA - // // - DHE-RSA-AES128-SHA256 - // // - DHE-RSA-AES256-SHA256 - // // - AES128-GCM-SHA256 - // // - AES256-GCM-SHA384 - // // - AES128-SHA256 - // // - AES256-SHA256 - // // - AES128-SHA - // // - AES256-SHA - // // - DES-CBC3-SHA - // // minTLSVersion: VersionTLS10 // // +optional @@ -98,29 +70,17 @@ type TLSSecurityProfile struct { // and looks like this (yaml): // // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - DHE-RSA-AES128-GCM-SHA256 - // // - DHE-RSA-AES256-GCM-SHA384 - // // minTLSVersion: VersionTLS12 // // +optional @@ -133,15 +93,13 @@ type TLSSecurityProfile struct { // and looks like this (yaml): // // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // minTLSVersion: VersionTLS13 // + // NOTE: Currently unsupported. + // // +optional // +nullable Modern *ModernTLSProfile `json:"modern,omitempty"` @@ -150,15 +108,10 @@ type TLSSecurityProfile struct { // looks like this: // // ciphers: - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // minTLSVersion: VersionTLS11 // // +optional @@ -189,16 +142,16 @@ type CustomTLSProfile struct { type TLSProfileType string const ( - // Old is a TLS security profile based on: + // TLSProfileOldType is a TLS security profile based on: // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility TLSProfileOldType TLSProfileType = "Old" - // Intermediate is a TLS security profile based on: + // TLSProfileIntermediateType is a TLS security profile based on: // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 TLSProfileIntermediateType TLSProfileType = "Intermediate" - // Modern is a TLS security profile based on: + // TLSProfileModernType is a TLS security profile based on: // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility TLSProfileModernType TLSProfileType = "Modern" - // Custom is a TLS security profile that allows for user-defined parameters. + // TLSProfileCustomType is a TLS security profile that allows for user-defined parameters. TLSProfileCustomType TLSProfileType = "Custom" ) @@ -211,7 +164,6 @@ type TLSProfileSpec struct { // ciphers: // - DES-CBC3-SHA // - // +listType=atomic Ciphers []string `json:"ciphers"` // minTLSVersion is used to specify the minimal version of the TLS protocol // that is negotiated during the TLS handshake. For example, to use TLS @@ -235,13 +187,13 @@ type TLSProfileSpec struct { type TLSProtocolVersion string const ( - // VersionTLSv10 is version 1.0 of the TLS security protocol. + // VersionTLS10 is version 1.0 of the TLS security protocol. VersionTLS10 TLSProtocolVersion = "VersionTLS10" - // VersionTLSv11 is version 1.1 of the TLS security protocol. + // VersionTLS11 is version 1.1 of the TLS security protocol. VersionTLS11 TLSProtocolVersion = "VersionTLS11" - // VersionTLSv12 is version 1.2 of the TLS security protocol. + // VersionTLS12 is version 1.2 of the TLS security protocol. VersionTLS12 TLSProtocolVersion = "VersionTLS12" - // VersionTLSv13 is version 1.3 of the TLS security protocol. + // VersionTLS13 is version 1.3 of the TLS security protocol. VersionTLS13 TLSProtocolVersion = "VersionTLS13" ) @@ -249,7 +201,7 @@ const ( // // NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all // entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, -// just be sure to whitelist only and everything will be ok. +// just be sure to allowlist only and everything will be ok. var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ TLSProfileOldType: { Ciphers: []string{ diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 30b665b7..1fbe1268 100644 --- a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,6 @@ limitations under the License. package v1beta1 import ( - configv1 "github.com/openshift/api/config/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -69,6 +68,11 @@ func (in *CDICertConfig) DeepCopyInto(out *CDICertConfig) { *out = new(CertConfig) (*in).DeepCopyInto(*out) } + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(CertConfig) + (*in).DeepCopyInto(*out) + } return } @@ -193,7 +197,7 @@ func (in *CDIConfigSpec) DeepCopyInto(out *CDIConfigSpec) { } if in.TLSSecurityProfile != nil { in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile - *out = new(configv1.TLSSecurityProfile) + *out = new(TLSSecurityProfile) (*in).DeepCopyInto(*out) } if in.ImagePullSecrets != nil { @@ -227,6 +231,11 @@ func (in *CDIConfigStatus) DeepCopyInto(out *CDIConfigStatus) { *out = new(string) **out = **in } + if in.UploadProxyCA != nil { + in, out := &in.UploadProxyCA, &out.UploadProxyCA + *out = new(string) + **out = **in + } if in.ImportProxy != nil { in, out := &in.ImportProxy, &out.ImportProxy *out = new(ImportProxy) @@ -456,6 +465,23 @@ func (in *ConditionState) DeepCopy() *ConditionState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) { *out = *in @@ -1408,6 +1434,38 @@ func (in *ImportStatus) DeepCopy() *ImportStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectTransfer) DeepCopyInto(out *ObjectTransfer) { *out = *in @@ -1540,6 +1598,22 @@ func (in *ObjectTransferStatus) DeepCopy() *ObjectTransferStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageProfile) DeepCopyInto(out *StorageProfile) { *out = *in @@ -1734,6 +1808,63 @@ func (in *StorageSpec) DeepCopy() *StorageSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TransferSource) DeepCopyInto(out *TransferSource) { *out = *in diff --git a/vendor/modules.txt b/vendor/modules.txt index d1992ab6..d6dac609 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,9 +8,13 @@ github.com/emicklei/go-restful/v3/log # github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 # github.com/go-kit/kit v0.13.0 ## explicit; go 1.17 github.com/go-kit/kit/log @@ -41,10 +45,10 @@ github.com/go-task/slim-sprig/v3 ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang-jwt/jwt/v4 v4.5.0 +# github.com/golang-jwt/jwt/v4 v4.5.1 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 -# github.com/golang/glog v1.2.2 +# github.com/golang/glog v1.2.3 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -73,8 +77,8 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit @@ -91,7 +95,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1 +# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.5 ## explicit; go 1.21 github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1 @@ -119,8 +123,8 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.19.1 -## explicit; go 1.20 +# github.com/onsi/ginkgo/v2 v2.21.0 +## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -141,8 +145,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.34.1 -## explicit; go 1.20 +# github.com/onsi/gomega v1.35.1 +## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -155,7 +159,6 @@ github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types # github.com/openshift/api v0.0.0-20240717221938-8da8de571496 ## explicit; go 1.22.0 -github.com/openshift/api/config/v1 github.com/openshift/api/route/v1 github.com/openshift/api/security/v1 # github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 @@ -174,18 +177,17 @@ github.com/openshift/custom-resource-status/conditions/v1 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 ## explicit; go 1.22.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 -## explicit; go 1.20 -golang.org/x/exp/constraints -golang.org/x/exp/slices -# golang.org/x/net v0.28.0 +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 +# golang.org/x/net v0.31.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -198,19 +200,19 @@ golang.org/x/net/idna golang.org/x/net/internal/socks golang.org/x/net/proxy golang.org/x/net/websocket -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.23.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.23.0 +# golang.org/x/term v0.26.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -232,15 +234,15 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.24.0 -## explicit; go 1.19 +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -348,7 +350,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.30.3 +# k8s.io/apimachinery v0.31.2 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -367,6 +369,8 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -619,7 +623,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -630,7 +634,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# kubevirt.io/api v1.3.0 +# kubevirt.io/api v1.3.1 ## explicit; go 1.22.0 kubevirt.io/api/clone kubevirt.io/api/clone/v1alpha1 @@ -650,7 +654,7 @@ kubevirt.io/api/pool/v1alpha1 kubevirt.io/api/snapshot kubevirt.io/api/snapshot/v1alpha1 kubevirt.io/api/snapshot/v1beta1 -# kubevirt.io/client-go v1.3.0 +# kubevirt.io/client-go v1.3.1 ## explicit; go 1.22.0 kubevirt.io/client-go/generated/containerized-data-importer/clientset/versioned kubevirt.io/client-go/generated/containerized-data-importer/clientset/versioned/scheme @@ -683,8 +687,8 @@ kubevirt.io/client-go/log kubevirt.io/client-go/subresources kubevirt.io/client-go/util kubevirt.io/client-go/version -# kubevirt.io/containerized-data-importer-api v1.59.0 -## explicit; go 1.21 +# kubevirt.io/containerized-data-importer-api v1.60.3 +## explicit; go 1.22.3 kubevirt.io/containerized-data-importer-api/pkg/apis/core kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1 kubevirt.io/containerized-data-importer-api/pkg/apis/upload