From 8476837661f12117dbf5b9ab290c81e2fa8a6edb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 01:52:48 +0000 Subject: [PATCH] fix(deps): update all go dependencies main Signed-off-by: renovate[bot] --- go.mod | 20 +- go.sum | 156 +- vendor/github.com/armon/go-metrics/.gitignore | 26 + .../github.com/armon/go-metrics/.travis.yml | 13 + vendor/github.com/armon/go-metrics/LICENSE | 20 + vendor/github.com/armon/go-metrics/README.md | 91 + .../github.com/armon/go-metrics/const_unix.go | 12 + .../armon/go-metrics/const_windows.go | 13 + vendor/github.com/armon/go-metrics/inmem.go | 339 + .../armon/go-metrics/inmem_endpoint.go | 162 + .../armon/go-metrics/inmem_signal.go | 117 + vendor/github.com/armon/go-metrics/metrics.go | 299 + vendor/github.com/armon/go-metrics/sink.go | 132 + vendor/github.com/armon/go-metrics/start.go | 158 + vendor/github.com/armon/go-metrics/statsd.go | 184 + .../github.com/armon/go-metrics/statsite.go | 172 + vendor/github.com/cilium/cilium/AUTHORS | 8 +- .../cilium/cilium/daemon/k8s/init.go | 170 + .../cilium/cilium/daemon/k8s/resources.go | 117 + .../cilium/cilium/pkg/allocator/allocator.go | 1069 + .../cilium/cilium/pkg/allocator/cache.go | 381 + .../cilium/cilium/pkg/allocator/doc.go | 5 + .../cilium/cilium/pkg/allocator/localkeys.go | 156 + .../cilium/cilium/pkg/allocator/logfields.go | 10 + .../cilium/cilium/pkg/annotation/k8s.go | 159 + .../cilium/cilium/pkg/backoff/backoff.go | 203 + .../cilium/pkg/bgpv1/agent/annotations.go | 198 + .../cilium/pkg/bgpv1/agent/controller.go | 344 + .../cilium/cilium/pkg/bgpv1/agent/mock.go | 18 + .../cilium/pkg/bgpv1/agent/routermgr.go | 53 + .../pkg/bgpv1/agent/signaler/signaler.go | 39 + .../cilium/cilium/pkg/bgpv1/api/get_peer.go | 40 + .../pkg/bgpv1/api/get_route_policies.go | 37 + .../cilium/cilium/pkg/bgpv1/api/get_routes.go | 36 + .../github.com/cilium/cilium/pkg/bpf/bpf.go | 54 + .../cilium/cilium/pkg/bpf/bpf_linux.go | 168 + .../cilium/cilium/pkg/bpf/bpffs_linux.go | 291 + .../cilium/cilium/pkg/bpf/bpffs_migrate.go | 185 + .../cilium/cilium/pkg/bpf/bpfmap.go | 24 + .../cilium/cilium/pkg/bpf/collection.go | 334 + .../github.com/cilium/cilium/pkg/bpf/doc.go | 7 + .../cilium/cilium/pkg/bpf/endpoint.go | 71 + .../cilium/cilium/pkg/bpf/events.go | 286 + .../github.com/cilium/cilium/pkg/bpf/link.go | 47 + .../github.com/cilium/cilium/pkg/bpf/map.go | 77 + .../cilium/cilium/pkg/bpf/map_linux.go | 1206 + .../cilium/pkg/bpf/map_register_linux.go | 69 + .../cilium/cilium/pkg/bpf/metrics.go | 12 + .../cilium/cilium/pkg/bpf/stats_linux.go | 65 + .../cilium/cilium/pkg/byteorder/byteorder.go | 22 + .../pkg/byteorder/byteorder_bigendian.go | 17 + .../pkg/byteorder/byteorder_littleendian.go | 20 + .../cilium/cilium/pkg/byteorder/doc.go | 5 + .../cilium/cilium/pkg/common/const.go | 15 + .../cilium/cilium/pkg/common/utils.go | 137 + .../cilium/pkg/container/ring_buffer.go | 152 + .../cilium/cilium/pkg/controller/cell.go | 74 + .../cilium/pkg/controller/controller.go | 405 + .../cilium/cilium/pkg/controller/doc.go | 6 + .../cilium/cilium/pkg/controller/logfields.go | 26 + .../cilium/cilium/pkg/controller/manager.go | 374 + .../cilium/cilium/pkg/counter/counter.go | 41 + .../cilium/cilium/pkg/counter/doc.go | 5 + .../cilium/cilium/pkg/counter/integer.go | 40 + .../cilium/cilium/pkg/counter/prefixes.go | 171 + .../certificatemanager/certificate_manager.go | 177 + .../pkg/datapath/linux/bandwidth/bandwidth.go | 303 + .../pkg/datapath/linux/bandwidth/cell.go | 72 + .../pkg/datapath/linux/bandwidth/doc.go | 5 + .../pkg/datapath/linux/bandwidth/types.go | 10 + .../datapath/linux/config/defines/defines.go | 52 + .../pkg/datapath/loader/metrics/metrics.go | 27 + .../cilium/cilium/pkg/datapath/tunnel/cell.go | 53 + .../cilium/pkg/datapath/tunnel/tunnel.go | 221 + .../cilium/pkg/datapath/types/config.go | 130 + .../cilium/pkg/datapath/types/datapath.go | 39 + .../cilium/pkg/datapath/types/endpoint.go | 15 + .../cilium/cilium/pkg/datapath/types/ipsec.go | 10 + .../cilium/cilium/pkg/datapath/types/lbmap.go | 109 + .../cilium/pkg/datapath/types/loader.go | 82 + .../cilium/cilium/pkg/datapath/types/node.go | 153 + .../pkg/datapath/types/node_addressing.go | 44 + .../cilium/pkg/datapath/types/wireguard.go | 17 + .../cilium/cilium/pkg/debug/subsystem.go | 94 + .../github.com/cilium/cilium/pkg/ebpf/doc.go | 7 + .../github.com/cilium/cilium/pkg/ebpf/ebpf.go | 13 + .../github.com/cilium/cilium/pkg/ebpf/map.go | 208 + .../cilium/cilium/pkg/ebpf/map_register.go | 57 + .../cilium/pkg/endpoint/regeneration/owner.go | 67 + .../regeneration/regeneration_context.go | 63 + .../cilium/cilium/pkg/eventqueue/doc.go | 6 + .../cilium/pkg/eventqueue/eventqueue.go | 314 + .../cilium/cilium/pkg/fqdn/restore/restore.go | 74 + .../cilium/cilium/pkg/hive/cell/lifecycle.go | 3 + .../cilium/cilium/pkg/hive/job/job.go | 623 + .../cilium/cilium/pkg/hive/job/metrics.go | 49 + .../cilium/pkg/identity/cache/allocator.go | 575 + .../cilium/cilium/pkg/identity/cache/cache.go | 280 + .../cilium/cilium/pkg/identity/cache/local.go | 265 + .../pkg/identity/identitymanager/doc.go | 6 + .../pkg/identity/identitymanager/log.go | 13 + .../pkg/identity/identitymanager/manager.go | 216 + .../pkg/identity/identitymanager/observer.go | 21 + .../pkg/identity/key/global_identity.go | 72 + .../cilium/pkg/identity/model/identity.go | 44 + .../cilium/cilium/pkg/idpool/idpool.go | 239 + .../cilium/pkg/ipcache/types/entries.go | 35 + .../cilium/cilium/pkg/ipcache/types/types.go | 109 + .../cilium/cilium/pkg/k8s/annotate.go | 127 + .../cilium/cilium/pkg/k8s/cache_status.go | 23 + .../cilium/cilium/pkg/k8s/cilium_node.go | 15 + .../cilium/cilium/pkg/k8s/client/cell.go | 510 + .../versioned/fake/clientset_generated.go | 79 + .../client/clientset/versioned/fake/doc.go | 7 + .../clientset/versioned/fake/register.go | 45 + .../versioned/typed/cilium.io/v2/fake/doc.go | 7 + .../v2/fake/fake_cilium.io_client.go | 63 + .../fake/fake_ciliumclusterwideenvoyconfig.go | 108 + .../fake_ciliumclusterwidenetworkpolicy.go | 119 + .../v2/fake/fake_ciliumegressgatewaypolicy.go | 108 + .../cilium.io/v2/fake/fake_ciliumendpoint.go | 128 + .../v2/fake/fake_ciliumenvoyconfig.go | 116 + .../v2/fake/fake_ciliumexternalworkload.go | 119 + .../cilium.io/v2/fake/fake_ciliumidentity.go | 108 + .../v2/fake/fake_ciliumlocalredirectpolicy.go | 128 + .../v2/fake/fake_ciliumnetworkpolicy.go | 128 + .../cilium.io/v2/fake/fake_ciliumnode.go | 119 + .../typed/cilium.io/v2alpha1/fake/doc.go | 7 + .../v2alpha1/fake/fake_cilium.io_client.go | 71 + .../fake/fake_ciliumbgpadvertisement.go | 108 + .../fake/fake_ciliumbgpclusterconfig.go | 108 + .../v2alpha1/fake/fake_ciliumbgpnodeconfig.go | 119 + .../fake/fake_ciliumbgpnodeconfigoverride.go | 108 + .../v2alpha1/fake/fake_ciliumbgppeerconfig.go | 108 + .../fake/fake_ciliumbgppeeringpolicy.go | 108 + .../v2alpha1/fake/fake_ciliumcidrgroup.go | 108 + .../v2alpha1/fake/fake_ciliumendpointslice.go | 108 + .../fake/fake_ciliuml2announcementpolicy.go | 119 + .../fake/fake_ciliumloadbalancerippool.go | 119 + .../v2alpha1/fake/fake_ciliumnodeconfig.go | 116 + .../v2alpha1/fake/fake_ciliumpodippool.go | 108 + .../cilium/cilium/pkg/k8s/client/config.go | 69 + .../cilium/cilium/pkg/k8s/client/getters.go | 57 + .../cilium/cilium/pkg/k8s/constants/const.go | 10 + .../github.com/cilium/cilium/pkg/k8s/doc.go | 6 + .../cilium/cilium/pkg/k8s/endpoints.go | 497 + .../cilium/cilium/pkg/k8s/error_helpers.go | 86 + .../cilium/pkg/k8s/factory_functions.go | 551 + .../pkg/k8s/identitybackend/identity.go | 416 + .../cilium/pkg/k8s/informer/informer.go | 148 + .../cilium/cilium/pkg/k8s/json_patch.go | 17 + .../cilium/cilium/pkg/k8s/labels.go | 132 + .../cilium/cilium/pkg/k8s/logfields.go | 20 + .../cilium/cilium/pkg/k8s/metrics/metrics.go | 38 + .../cilium/cilium/pkg/k8s/network_policy.go | 334 + .../github.com/cilium/cilium/pkg/k8s/node.go | 248 + .../cilium/cilium/pkg/k8s/resource/error.go | 39 + .../cilium/cilium/pkg/k8s/resource/event.go | 31 + .../cilium/cilium/pkg/k8s/resource/key.go | 41 + .../cilium/pkg/k8s/resource/resource.go | 912 + .../cilium/cilium/pkg/k8s/resource/scheme.go | 33 + .../cilium/cilium/pkg/k8s/resource/store.go | 123 + .../cilium/cilium/pkg/k8s/resource_ctors.go | 383 + .../cilium/cilium/pkg/k8s/rule_translate.go | 269 + .../cilium/cilium/pkg/k8s/service.go | 730 + .../cilium/cilium/pkg/k8s/service_cache.go | 841 + .../pkg/k8s/slim/k8s/api/discovery/v1/doc.go | 11 + .../slim/k8s/api/discovery/v1/generated.pb.go | 2092 ++ .../slim/k8s/api/discovery/v1/generated.proto | 161 + .../k8s/slim/k8s/api/discovery/v1/register.go | 45 + .../k8s/slim/k8s/api/discovery/v1/types.go | 176 + .../k8s/api/discovery/v1/well_known_labels.go | 21 + .../api/discovery/v1/zz_generated.deepcopy.go | 230 + .../discovery/v1/zz_generated.deepequal.go | 284 + .../k8s/slim/k8s/api/discovery/v1beta1/doc.go | 12 + .../k8s/api/discovery/v1beta1/generated.pb.go | 1992 ++ .../k8s/api/discovery/v1beta1/generated.proto | 156 + .../k8s/api/discovery/v1beta1/register.go | 45 + .../slim/k8s/api/discovery/v1beta1/types.go | 180 + .../discovery/v1beta1/well_known_labels.go | 21 + .../v1beta1/zz_generated.deepcopy.go | 220 + .../v1beta1/zz_generated.deepequal.go | 268 + .../pkg/k8s/slim/k8s/api/networking/v1/doc.go | 11 + .../k8s/api/networking/v1/generated.pb.go | 2195 ++ .../k8s/api/networking/v1/generated.proto | 191 + .../slim/k8s/api/networking/v1/register.go | 42 + .../k8s/slim/k8s/api/networking/v1/types.go | 209 + .../networking/v1/well_known_annotations.go | 14 + .../networking/v1/zz_generated.deepcopy.go | 255 + .../networking/v1/zz_generated.deepequal.go | 317 + .../clientset/versioned/scheme/doc.go | 7 + .../clientset/versioned/scheme/register.go | 43 + .../apiextensions/v1/apiextensions_client.go | 94 + .../v1/customresourcedefinition.go | 155 + .../versioned/typed/apiextensions/v1/doc.go | 7 + .../apiextensions/v1/generated_expansion.go | 8 + .../k8s/apiextensions-clientset/clientset.go | 89 + .../k8s/slim/k8s/apis/apiextensions/v1/doc.go | 14 + .../k8s/apis/apiextensions/v1/generated.pb.go | 568 + .../k8s/apis/apiextensions/v1/generated.proto | 37 + .../k8s/apis/apiextensions/v1/register.go | 43 + .../slim/k8s/apis/apiextensions/v1/types.go | 39 + .../apiextensions/v1/zz_generated.deepcopy.go | 72 + .../pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go | 12 + .../k8s/apis/meta/v1beta1/generated.pb.go | 401 + .../k8s/apis/meta/v1beta1/generated.proto | 28 + .../slim/k8s/apis/meta/v1beta1/register.go | 32 + .../k8s/slim/k8s/apis/meta/v1beta1/types.go | 33 + .../meta/v1beta1/zz_generated.deepcopy.go | 47 + .../pkg/k8s/slim/k8s/apis/util/intstr/doc.go | 8 + .../slim/k8s/apis/util/intstr/generated.pb.go | 356 + .../slim/k8s/apis/util/intstr/generated.proto | 30 + .../k8s/slim/k8s/apis/util/intstr/intstr.go | 221 + .../util/intstr/zz_generated.deepequal.go | 29 + .../client/clientset/versioned/clientset.go | 146 + .../versioned/fake/clientset_generated.go | 93 + .../client/clientset/versioned/fake/doc.go | 7 + .../clientset/versioned/fake/register.go | 49 + .../client/clientset/versioned/scheme/doc.go | 7 + .../clientset/versioned/scheme/register.go | 49 + .../versioned/typed/core/v1/core_client.go | 119 + .../clientset/versioned/typed/core/v1/doc.go | 7 + .../versioned/typed/core/v1/endpoints.go | 165 + .../versioned/typed/core/v1/fake/doc.go | 7 + .../typed/core/v1/fake/fake_core_client.go | 47 + .../typed/core/v1/fake/fake_endpoints.go | 116 + .../typed/core/v1/fake/fake_namespace.go | 100 + .../versioned/typed/core/v1/fake/fake_node.go | 119 + .../versioned/typed/core/v1/fake/fake_pod.go | 139 + .../typed/core/v1/fake/fake_secret.go | 116 + .../typed/core/v1/fake/fake_service.go | 120 + .../typed/core/v1/generated_expansion.go | 18 + .../versioned/typed/core/v1/namespace.go | 139 + .../clientset/versioned/typed/core/v1/node.go | 171 + .../clientset/versioned/typed/core/v1/pod.go | 199 + .../versioned/typed/core/v1/secret.go | 165 + .../versioned/typed/core/v1/service.go | 165 + .../typed/discovery/v1/discovery_client.go | 94 + .../versioned/typed/discovery/v1/doc.go | 7 + .../typed/discovery/v1/endpointslice.go | 165 + .../versioned/typed/discovery/v1/fake/doc.go | 7 + .../v1/fake/fake_discovery_client.go | 27 + .../discovery/v1/fake/fake_endpointslice.go | 116 + .../typed/discovery/v1/generated_expansion.go | 8 + .../discovery/v1beta1/discovery_client.go | 94 + .../versioned/typed/discovery/v1beta1/doc.go | 7 + .../typed/discovery/v1beta1/endpointslice.go | 165 + .../typed/discovery/v1beta1/fake/doc.go | 7 + .../v1beta1/fake/fake_discovery_client.go | 27 + .../v1beta1/fake/fake_endpointslice.go | 116 + .../discovery/v1beta1/generated_expansion.go | 8 + .../versioned/typed/networking/v1/doc.go | 7 + .../versioned/typed/networking/v1/fake/doc.go | 7 + .../v1/fake/fake_networking_client.go | 27 + .../networking/v1/fake/fake_networkpolicy.go | 116 + .../networking/v1/generated_expansion.go | 8 + .../typed/networking/v1/networking_client.go | 94 + .../typed/networking/v1/networkpolicy.go | 165 + .../cilium/cilium/pkg/k8s/types/doc.go | 9 + .../cilium/cilium/pkg/k8s/types/types.go | 68 + .../pkg/k8s/types/zz_generated.deepcopy.go | 116 + .../pkg/k8s/types/zz_generated.deepequal.go | 98 + .../cilium/cilium/pkg/k8s/version/version.go | 309 + .../pkg/k8s/watchers/resources/resources.go | 38 + .../cilium/pkg/k8s/zz_generated.deepcopy.go | 187 + .../cilium/pkg/k8s/zz_generated.deepequal.go | 297 + .../cilium/pkg/kvstore/allocator/allocator.go | 637 + .../cilium/pkg/kvstore/allocator/doc.go | 5 + .../cilium/pkg/kvstore/allocator/logfields.go | 13 + .../cilium/cilium/pkg/kvstore/backend.go | 236 + .../cilium/pkg/kvstore/backwards_compat.go | 31 + .../cilium/cilium/pkg/kvstore/cell.go | 155 + .../cilium/cilium/pkg/kvstore/client.go | 104 + .../cilium/cilium/pkg/kvstore/config.go | 103 + .../cilium/cilium/pkg/kvstore/consul.go | 790 + .../cilium/cilium/pkg/kvstore/doc.go | 6 + .../cilium/cilium/pkg/kvstore/dummy.go | 87 + .../cilium/cilium/pkg/kvstore/etcd.go | 1894 ++ .../cilium/cilium/pkg/kvstore/etcd_lease.go | 307 + .../cilium/cilium/pkg/kvstore/events.go | 91 + .../cilium/cilium/pkg/kvstore/kvstore.go | 73 + .../cilium/cilium/pkg/kvstore/lock.go | 172 + .../cilium/cilium/pkg/kvstore/logfields.go | 46 + .../cilium/cilium/pkg/kvstore/metrics.go | 53 + .../cilium/cilium/pkg/kvstore/store/cell.go | 45 + .../cilium/cilium/pkg/kvstore/store/doc.go | 17 + .../cilium/pkg/kvstore/store/metrics.go | 38 + .../cilium/cilium/pkg/kvstore/store/store.go | 506 + .../cilium/pkg/kvstore/store/syncstore.go | 360 + .../cilium/pkg/kvstore/store/watchstore.go | 252 + .../cilium/pkg/kvstore/store/watchstoremgr.go | 144 + .../cilium/cilium/pkg/kvstore/trace.go | 24 + .../cilium/pkg/kvstore/watcher_cache.go | 41 + .../cilium/cilium/pkg/maps/bwmap/bwmap.go | 83 + .../cilium/cilium/pkg/maps/bwmap/doc.go | 7 + .../cilium/cilium/pkg/maps/lxcmap/doc.go | 9 + .../cilium/cilium/pkg/maps/lxcmap/lxcmap.go | 239 + .../cilium/cilium/pkg/mountinfo/mountinfo.go | 115 + .../cilium/pkg/mountinfo/mountinfo_linux.go | 58 + .../pkg/mountinfo/mountinfo_unspecified.go | 24 + .../cilium/cilium/pkg/node/address.go | 631 + .../cilium/cilium/pkg/node/address_linux.go | 242 + .../cilium/cilium/pkg/node/address_other.go | 34 + .../github.com/cilium/cilium/pkg/node/doc.go | 6 + .../cilium/cilium/pkg/node/host_endpoint.go | 22 + .../github.com/cilium/cilium/pkg/node/ip.go | 14 + .../cilium/cilium/pkg/node/ip_linux.go | 52 + .../cilium/pkg/node/local_node_store.go | 159 + .../cilium/cilium/pkg/node/logfields.go | 11 + .../cilium/cilium/pkg/node/types/logfields.go | 11 + .../cilium/cilium/pkg/node/types/node.go | 659 + .../cilium/cilium/pkg/node/types/nodename.go | 68 + .../pkg/node/types/zz_generated.deepcopy.go | 138 + .../cilium/cilium/pkg/policy/cidr.go | 75 + .../cilium/cilium/pkg/policy/config.go | 71 + .../cilium/cilium/pkg/policy/distillery.go | 241 + .../cilium/cilium/pkg/policy/identifier.go | 86 + .../github.com/cilium/cilium/pkg/policy/l4.go | 1404 + .../cilium/cilium/pkg/policy/mapstate.go | 1456 + .../cilium/cilium/pkg/policy/policy.go | 118 + .../cilium/cilium/pkg/policy/proxyid.go | 49 + .../cilium/cilium/pkg/policy/repository.go | 815 + .../cilium/cilium/pkg/policy/resolve.go | 304 + .../cilium/cilium/pkg/policy/rule.go | 790 + .../cilium/cilium/pkg/policy/rules.go | 160 + .../cilium/cilium/pkg/policy/selectorcache.go | 628 + .../pkg/policy/selectorcache_selector.go | 359 + .../cilium/pkg/policy/trafficdirection/doc.go | 6 + .../trafficdirection/trafficdirection.go | 33 + .../cilium/cilium/pkg/policy/trigger.go | 90 + .../cilium/cilium/pkg/policy/utils.go | 11 + .../cilium/cilium/pkg/policy/visibility.go | 218 + .../cilium/pkg/proxy/accesslog/record.go | 303 + .../cilium/cilium/pkg/rand/safe_rand.go | 87 + .../cilium/cilium/pkg/rate/api_limiter.go | 897 + .../github.com/cilium/cilium/pkg/rate/doc.go | 8 + .../cilium/cilium/pkg/rate/limiter.go | 110 + .../cilium/cilium/pkg/rate/metrics/metrics.go | 35 + .../cilium/cilium/pkg/safeio/safeio.go | 77 + .../cilium/pkg/service/store/logfields.go | 11 + .../cilium/cilium/pkg/service/store/store.go | 216 + .../service/store/zz_generated.deepcopy.go | 93 + .../service/store/zz_generated.deepequal.go | 33 + .../cilium/cilium/pkg/sysctl/doc.go | 5 + .../cilium/cilium/pkg/sysctl/sysctl.go | 182 + .../cilium/cilium/pkg/trigger/doc.go | 6 + .../cilium/cilium/pkg/trigger/trigger.go | 221 + .../cilium/cilium/pkg/types/ipv4.go | 45 + .../cilium/cilium/pkg/types/ipv6.go | 41 + .../cilium/cilium/pkg/types/macaddr.go | 24 + .../cilium/cilium/pkg/types/portmap.go | 219 + .../cilium/cilium/pkg/u8proto/u8proto.go | 58 + .../cilium/pkg/wireguard/types/types.go | 15 + .../cilium/proxy/go/cilium/api/accesslog.go | 19 + .../proxy/go/cilium/api/accesslog.pb.go | 860 + .../go/cilium/api/accesslog.pb.validate.go | 810 + .../proxy/go/cilium/api/bpf_metadata.pb.go | 250 + .../go/cilium/api/bpf_metadata.pb.validate.go | 151 + .../go/cilium/api/health_check_sink.pb.go | 157 + .../api/health_check_sink.pb.validate.go | 149 + .../cilium/proxy/go/cilium/api/l7policy.pb.go | 167 + .../go/cilium/api/l7policy.pb.validate.go | 139 + .../proxy/go/cilium/api/network_filter.pb.go | 183 + .../cilium/api/network_filter.pb.validate.go | 142 + .../cilium/proxy/go/cilium/api/npds.pb.go | 1772 ++ .../proxy/go/cilium/api/npds.pb.validate.go | 1971 ++ .../cilium/proxy/go/cilium/api/nphds.pb.go | 362 + .../proxy/go/cilium/api/nphds.pb.validate.go | 171 + .../proxy/go/cilium/api/tls_wrapper.pb.go | 194 + .../go/cilium/api/tls_wrapper.pb.validate.go | 241 + .../proxy/go/cilium/api/websocket.pb.go | 411 + .../go/cilium/api/websocket.pb.validate.go | 389 + vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + .../github.com/coreos/go-systemd/v22/LICENSE | 191 + .../github.com/coreos/go-systemd/v22/NOTICE | 5 + .../coreos/go-systemd/v22/journal/journal.go | 46 + .../go-systemd/v22/journal/journal_unix.go | 267 + .../go-systemd/v22/journal/journal_windows.go | 43 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 ++ .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + .../hashicorp/consul/api/.copywrite.hcl | 8 + .../github.com/hashicorp/consul/api/LICENSE | 365 + .../github.com/hashicorp/consul/api/README.md | 77 + vendor/github.com/hashicorp/consul/api/acl.go | 1738 ++ .../github.com/hashicorp/consul/api/agent.go | 1446 + vendor/github.com/hashicorp/consul/api/api.go | 1277 + .../hashicorp/consul/api/catalog.go | 377 + .../hashicorp/consul/api/config_entry.go | 687 + .../consul/api/config_entry_discoverychain.go | 384 + .../consul/api/config_entry_exports.go | 82 + .../consul/api/config_entry_gateways.go | 344 + .../api/config_entry_inline_certificate.go | 46 + .../consul/api/config_entry_intentions.go | 100 + .../consul/api/config_entry_jwt_provider.go | 310 + .../hashicorp/consul/api/config_entry_mesh.go | 90 + .../consul/api/config_entry_rate_limit_ip.go | 91 + .../consul/api/config_entry_routes.go | 281 + .../consul/api/config_entry_sameness_group.go | 29 + .../consul/api/config_entry_status.go | 358 + .../hashicorp/consul/api/connect.go | 18 + .../hashicorp/consul/api/connect_ca.go | 201 + .../hashicorp/consul/api/connect_intention.go | 461 + .../hashicorp/consul/api/coordinate.go | 122 + .../github.com/hashicorp/consul/api/debug.go | 141 + .../hashicorp/consul/api/discovery_chain.go | 283 + .../github.com/hashicorp/consul/api/event.go | 114 + .../github.com/hashicorp/consul/api/health.go | 398 + .../hashicorp/consul/api/internal.go | 67 + vendor/github.com/hashicorp/consul/api/kv.go | 307 + .../github.com/hashicorp/consul/api/lock.go | 411 + .../hashicorp/consul/api/namespace.go | 227 + .../hashicorp/consul/api/operator.go | 14 + .../hashicorp/consul/api/operator_area.go | 209 + .../hashicorp/consul/api/operator_audit.go | 40 + .../consul/api/operator_autopilot.go | 404 + .../hashicorp/consul/api/operator_keyring.go | 110 + .../hashicorp/consul/api/operator_license.go | 134 + .../hashicorp/consul/api/operator_raft.go | 132 + .../hashicorp/consul/api/operator_segment.go | 14 + .../hashicorp/consul/api/operator_usage.go | 57 + .../hashicorp/consul/api/partition.go | 167 + .../hashicorp/consul/api/peering.go | 295 + .../hashicorp/consul/api/prepared_query.go | 269 + vendor/github.com/hashicorp/consul/api/raw.go | 27 + .../hashicorp/consul/api/semaphore.go | 533 + .../hashicorp/consul/api/session.go | 246 + .../hashicorp/consul/api/snapshot.go | 57 + .../github.com/hashicorp/consul/api/status.go | 70 + vendor/github.com/hashicorp/consul/api/txn.go | 249 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 58 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../github.com/hashicorp/go-hclog/.gitignore | 1 + vendor/github.com/hashicorp/go-hclog/LICENSE | 19 + .../github.com/hashicorp/go-hclog/README.md | 148 + .../hashicorp/go-hclog/colorize_unix.go | 44 + .../hashicorp/go-hclog/colorize_windows.go | 41 + .../github.com/hashicorp/go-hclog/context.go | 41 + .../github.com/hashicorp/go-hclog/exclude.go | 74 + .../github.com/hashicorp/go-hclog/global.go | 67 + .../hashicorp/go-hclog/interceptlogger.go | 207 + .../hashicorp/go-hclog/intlogger.go | 918 + .../github.com/hashicorp/go-hclog/logger.go | 393 + .../hashicorp/go-hclog/nulllogger.go | 63 + .../hashicorp/go-hclog/stacktrace.go | 109 + .../github.com/hashicorp/go-hclog/stdlog.go | 113 + .../github.com/hashicorp/go-hclog/writer.go | 85 + .../hashicorp/go-immutable-radix/.gitignore | 24 + .../hashicorp/go-immutable-radix/CHANGELOG.md | 23 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 66 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 676 + .../hashicorp/go-immutable-radix/iter.go | 205 + .../hashicorp/go-immutable-radix/node.go | 334 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + .../go-immutable-radix/reverse_iter.go | 239 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 + .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 44 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 123 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + .../hashicorp/golang-lru/simplelru/lru.go | 177 + .../golang-lru/simplelru/lru_interface.go | 39 + vendor/github.com/hashicorp/serf/LICENSE | 354 + .../hashicorp/serf/coordinate/client.go | 243 + .../hashicorp/serf/coordinate/config.go | 77 + .../hashicorp/serf/coordinate/coordinate.go | 203 + .../hashicorp/serf/coordinate/phantom.go | 187 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 167 + vendor/go.etcd.io/etcd/api/v3/LICENSE | 202 + .../go.etcd.io/etcd/api/v3/authpb/auth.pb.go | 1158 + .../go.etcd.io/etcd/api/v3/authpb/auth.proto | 42 + .../etcd/api/v3/etcdserverpb/etcdserver.pb.go | 1002 + .../etcd/api/v3/etcdserverpb/etcdserver.proto | 34 + .../api/v3/etcdserverpb/raft_internal.pb.go | 2673 ++ .../api/v3/etcdserverpb/raft_internal.proto | 81 + .../v3/etcdserverpb/raft_internal_stringer.go | 183 + .../etcd/api/v3/etcdserverpb/rpc.pb.go | 25862 ++++++++++++++++ .../etcd/api/v3/etcdserverpb/rpc.proto | 1199 + .../etcd/api/v3/membershippb/membership.pb.go | 1454 + .../etcd/api/v3/membershippb/membership.proto | 43 + vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go | 798 + vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto | 49 + .../etcd/api/v3/v3rpc/rpctypes/doc.go | 16 + .../etcd/api/v3/v3rpc/rpctypes/error.go | 267 + .../etcd/api/v3/v3rpc/rpctypes/md.go | 22 + .../api/v3/v3rpc/rpctypes/metadatafields.go | 20 + .../go.etcd.io/etcd/api/v3/version/version.go | 56 + vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE | 202 + .../etcd/client/pkg/v3/logutil/doc.go | 16 + .../etcd/client/pkg/v3/logutil/log_level.go | 30 + .../etcd/client/pkg/v3/logutil/zap.go | 108 + .../etcd/client/pkg/v3/logutil/zap_journal.go | 93 + .../etcd/client/pkg/v3/systemd/doc.go | 16 + .../etcd/client/pkg/v3/systemd/journal.go | 29 + .../client/pkg/v3/tlsutil/cipher_suites.go | 56 + .../etcd/client/pkg/v3/tlsutil/doc.go | 16 + .../etcd/client/pkg/v3/tlsutil/tlsutil.go | 73 + .../etcd/client/pkg/v3/tlsutil/versions.go | 47 + .../etcd/client/pkg/v3/types/doc.go | 17 + .../go.etcd.io/etcd/client/pkg/v3/types/id.go | 39 + .../etcd/client/pkg/v3/types/set.go | 195 + .../etcd/client/pkg/v3/types/slice.go | 22 + .../etcd/client/pkg/v3/types/urls.go | 82 + .../etcd/client/pkg/v3/types/urlsmap.go | 107 + vendor/go.etcd.io/etcd/client/v3/LICENSE | 202 + vendor/go.etcd.io/etcd/client/v3/README.md | 92 + vendor/go.etcd.io/etcd/client/v3/auth.go | 236 + vendor/go.etcd.io/etcd/client/v3/client.go | 612 + vendor/go.etcd.io/etcd/client/v3/cluster.go | 141 + .../go.etcd.io/etcd/client/v3/compact_op.go | 51 + vendor/go.etcd.io/etcd/client/v3/compare.go | 140 + .../etcd/client/v3/concurrency/doc.go | 17 + .../etcd/client/v3/concurrency/election.go | 254 + .../etcd/client/v3/concurrency/key.go | 65 + .../etcd/client/v3/concurrency/mutex.go | 167 + .../etcd/client/v3/concurrency/session.go | 141 + .../etcd/client/v3/concurrency/stm.go | 387 + vendor/go.etcd.io/etcd/client/v3/config.go | 92 + .../etcd/client/v3/credentials/credentials.go | 131 + vendor/go.etcd.io/etcd/client/v3/ctx.go | 50 + vendor/go.etcd.io/etcd/client/v3/doc.go | 106 + .../client/v3/internal/endpoint/endpoint.go | 134 + .../client/v3/internal/resolver/resolver.go | 74 + vendor/go.etcd.io/etcd/client/v3/kv.go | 177 + vendor/go.etcd.io/etcd/client/v3/lease.go | 607 + vendor/go.etcd.io/etcd/client/v3/logger.go | 59 + .../go.etcd.io/etcd/client/v3/maintenance.go | 255 + vendor/go.etcd.io/etcd/client/v3/op.go | 583 + vendor/go.etcd.io/etcd/client/v3/options.go | 69 + vendor/go.etcd.io/etcd/client/v3/retry.go | 306 + .../etcd/client/v3/retry_interceptor.go | 433 + vendor/go.etcd.io/etcd/client/v3/sort.go | 37 + vendor/go.etcd.io/etcd/client/v3/txn.go | 150 + vendor/go.etcd.io/etcd/client/v3/utils.go | 31 + vendor/go.etcd.io/etcd/client/v3/watch.go | 1042 + .../go.etcd.io/etcd/client/v3/yaml/config.go | 91 + vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 32 + vendor/go.uber.org/zap/.golangci.yml | 77 + vendor/go.uber.org/zap/.readme.tmpl | 109 + vendor/go.uber.org/zap/CHANGELOG.md | 671 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 + vendor/go.uber.org/zap/CONTRIBUTING.md | 70 + vendor/go.uber.org/zap/FAQ.md | 164 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 76 + vendor/go.uber.org/zap/README.md | 137 + vendor/go.uber.org/zap/array.go | 447 + vendor/go.uber.org/zap/buffer/buffer.go | 146 + vendor/go.uber.org/zap/buffer/pool.go | 53 + vendor/go.uber.org/zap/checklicense.sh | 17 + vendor/go.uber.org/zap/config.go | 330 + vendor/go.uber.org/zap/doc.go | 117 + vendor/go.uber.org/zap/encoder.go | 79 + vendor/go.uber.org/zap/error.go | 82 + vendor/go.uber.org/zap/field.go | 613 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.yaml | 34 + vendor/go.uber.org/zap/global.go | 169 + vendor/go.uber.org/zap/http_handler.go | 140 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 66 + .../go.uber.org/zap/internal/level_enabler.go | 37 + vendor/go.uber.org/zap/internal/pool/pool.go | 58 + .../zap/internal/stacktrace/stack.go | 181 + vendor/go.uber.org/zap/level.go | 153 + vendor/go.uber.org/zap/logger.go | 432 + vendor/go.uber.org/zap/options.go | 167 + vendor/go.uber.org/zap/sink.go | 180 + vendor/go.uber.org/zap/sugar.go | 437 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 98 + .../zap/zapcore/buffered_write_syncer.go | 219 + vendor/go.uber.org/zap/zapcore/clock.go | 48 + .../zap/zapcore/console_encoder.go | 157 + vendor/go.uber.org/zap/zapcore/core.go | 122 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 451 + vendor/go.uber.org/zap/zapcore/entry.go | 298 + vendor/go.uber.org/zap/zapcore/error.go | 136 + vendor/go.uber.org/zap/zapcore/field.go | 233 + vendor/go.uber.org/zap/zapcore/hook.go | 77 + .../go.uber.org/zap/zapcore/increase_level.go | 75 + .../go.uber.org/zap/zapcore/json_encoder.go | 583 + vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 + vendor/go.uber.org/zap/zapcore/level.go | 229 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 61 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + .../zap/zapcore/reflected_encoder.go | 41 + vendor/go.uber.org/zap/zapcore/sampler.go | 229 + vendor/go.uber.org/zap/zapcore/tee.go | 96 + .../go.uber.org/zap/zapcore/write_syncer.go | 122 + vendor/go.uber.org/zap/zapgrpc/zapgrpc.go | 245 + .../grpc/internal/transport/controlbuf.go | 5 +- .../grpc/internal/transport/http2_client.go | 8 +- .../grpc/internal/transport/http2_server.go | 23 +- .../grpc/resolver/manual/manual.go | 126 + vendor/google.golang.org/grpc/version.go | 2 +- .../clientset/fake/clientset_generated.go | 92 + .../client/clientset/clientset/fake/doc.go | 20 + .../clientset/clientset/fake/register.go | 58 + .../typed/apiextensions/v1/fake/doc.go | 20 + .../v1/fake/fake_apiextensions_client.go | 40 + .../v1/fake/fake_customresourcedefinition.go | 178 + .../typed/apiextensions/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_apiextensions_client.go | 40 + .../fake/fake_customresourcedefinition.go | 178 + vendor/modules.txt | 165 +- vendor/sigs.k8s.io/controller-runtime/LICENSE | 201 + .../pkg/client/apiutil/apimachinery.go | 246 + .../pkg/client/apiutil/errors.go | 54 + .../pkg/client/apiutil/restmapper.go | 294 + 636 files changed, 149444 insertions(+), 21 deletions(-) create mode 100644 vendor/github.com/armon/go-metrics/.gitignore create mode 100644 vendor/github.com/armon/go-metrics/.travis.yml create mode 100644 vendor/github.com/armon/go-metrics/LICENSE create mode 100644 vendor/github.com/armon/go-metrics/README.md create mode 100644 vendor/github.com/armon/go-metrics/const_unix.go create mode 100644 vendor/github.com/armon/go-metrics/const_windows.go create mode 100644 vendor/github.com/armon/go-metrics/inmem.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_endpoint.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go create mode 100644 vendor/github.com/armon/go-metrics/metrics.go create mode 100644 vendor/github.com/armon/go-metrics/sink.go create mode 100644 vendor/github.com/armon/go-metrics/start.go create mode 100644 vendor/github.com/armon/go-metrics/statsd.go create mode 100644 vendor/github.com/armon/go-metrics/statsite.go create mode 100644 vendor/github.com/cilium/cilium/daemon/k8s/init.go create mode 100644 vendor/github.com/cilium/cilium/daemon/k8s/resources.go create mode 100644 vendor/github.com/cilium/cilium/pkg/allocator/allocator.go create mode 100644 vendor/github.com/cilium/cilium/pkg/allocator/cache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/allocator/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go create mode 100644 vendor/github.com/cilium/cilium/pkg/allocator/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/annotation/k8s.go create mode 100644 vendor/github.com/cilium/cilium/pkg/backoff/backoff.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/agent/annotations.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/agent/controller.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/agent/mock.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/agent/routermgr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/agent/signaler/signaler.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_peer.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_route_policies.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_routes.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/bpf.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/bpf_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/bpffs_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/bpffs_migrate.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/bpfmap.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/collection.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/endpoint.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/events.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/link.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/map.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/map_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/map_register_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/bpf/stats_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go create mode 100644 vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go create mode 100644 vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go create mode 100644 vendor/github.com/cilium/cilium/pkg/byteorder/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/common/const.go create mode 100644 vendor/github.com/cilium/cilium/pkg/common/utils.go create mode 100644 vendor/github.com/cilium/cilium/pkg/container/ring_buffer.go create mode 100644 vendor/github.com/cilium/cilium/pkg/controller/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/controller/controller.go create mode 100644 vendor/github.com/cilium/cilium/pkg/controller/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/controller/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/controller/manager.go create mode 100644 vendor/github.com/cilium/cilium/pkg/counter/counter.go create mode 100644 vendor/github.com/cilium/cilium/pkg/counter/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/counter/integer.go create mode 100644 vendor/github.com/cilium/cilium/pkg/counter/prefixes.go create mode 100644 vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/bandwidth.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/config/defines/defines.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/tunnel/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/config.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/ipsec.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go create mode 100644 vendor/github.com/cilium/cilium/pkg/debug/subsystem.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ebpf/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ebpf/ebpf.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ebpf/map.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ebpf/map_register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go create mode 100644 vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go create mode 100644 vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go create mode 100644 vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go create mode 100644 vendor/github.com/cilium/cilium/pkg/hive/job/job.go create mode 100644 vendor/github.com/cilium/cilium/pkg/hive/job/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/cache/local.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go create mode 100644 vendor/github.com/cilium/cilium/pkg/identity/model/identity.go create mode 100644 vendor/github.com/cilium/cilium/pkg/idpool/idpool.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go create mode 100644 vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/annotate.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/cache_status.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/cilium_node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpadvertisement.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfigoverride.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/config.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/error_helpers.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/factory_functions.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/json_patch.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/labels.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/error.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/event.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/key.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/scheme.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource/store.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/rule_translate.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/service.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/types/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/types/types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/version/version.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/backend.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/client.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/config.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/consul.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/events.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/lock.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/trace.go create mode 100644 vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/maps/bwmap/bwmap.go create mode 100644 vendor/github.com/cilium/cilium/pkg/maps/bwmap/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/maps/lxcmap/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/maps/lxcmap/lxcmap.go create mode 100644 vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo.go create mode 100644 vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_unspecified.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/address.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/address_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/address_other.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/ip.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/ip_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/local_node_store.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/types/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/types/node.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/types/nodename.go create mode 100644 vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/cidr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/config.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/distillery.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/identifier.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/l4.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/mapstate.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/policy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/proxyid.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/repository.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/resolve.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/rule.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/rules.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/selectorcache_selector.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/trigger.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/utils.go create mode 100644 vendor/github.com/cilium/cilium/pkg/policy/visibility.go create mode 100644 vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go create mode 100644 vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go create mode 100644 vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go create mode 100644 vendor/github.com/cilium/cilium/pkg/rate/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/rate/limiter.go create mode 100644 vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go create mode 100644 vendor/github.com/cilium/cilium/pkg/safeio/safeio.go create mode 100644 vendor/github.com/cilium/cilium/pkg/service/store/logfields.go create mode 100644 vendor/github.com/cilium/cilium/pkg/service/store/store.go create mode 100644 vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/sysctl/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/sysctl/sysctl.go create mode 100644 vendor/github.com/cilium/cilium/pkg/trigger/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/trigger/trigger.go create mode 100644 vendor/github.com/cilium/cilium/pkg/types/ipv4.go create mode 100644 vendor/github.com/cilium/cilium/pkg/types/ipv6.go create mode 100644 vendor/github.com/cilium/cilium/pkg/types/macaddr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/types/portmap.go create mode 100644 vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go create mode 100644 vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go create mode 100644 vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/v22/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/hashicorp/consul/api/.copywrite.hcl create mode 100644 vendor/github.com/hashicorp/consul/api/LICENSE create mode 100644 vendor/github.com/hashicorp/consul/api/README.md create mode 100644 vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 vendor/github.com/hashicorp/consul/api/api.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_exports.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_gateways.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_intentions.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_mesh.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_routes.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_status.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect_ca.go create mode 100644 vendor/github.com/hashicorp/consul/api/connect_intention.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 vendor/github.com/hashicorp/consul/api/debug.go create mode 100644 vendor/github.com/hashicorp/consul/api/discovery_chain.go create mode 100644 vendor/github.com/hashicorp/consul/api/event.go create mode 100644 vendor/github.com/hashicorp/consul/api/health.go create mode 100644 vendor/github.com/hashicorp/consul/api/internal.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 vendor/github.com/hashicorp/consul/api/namespace.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_audit.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_license.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_segment.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_usage.go create mode 100644 vendor/github.com/hashicorp/consul/api/partition.go create mode 100644 vendor/github.com/hashicorp/consul/api/peering.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 vendor/github.com/hashicorp/consul/api/session.go create mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go create mode 100644 vendor/github.com/hashicorp/consul/api/status.go create mode 100644 vendor/github.com/hashicorp/consul/api/txn.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-hclog/.gitignore create mode 100644 vendor/github.com/hashicorp/go-hclog/LICENSE create mode 100644 vendor/github.com/hashicorp/go-hclog/README.md create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_unix.go create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_windows.go create mode 100644 vendor/github.com/hashicorp/go-hclog/context.go create mode 100644 vendor/github.com/hashicorp/go-hclog/exclude.go create mode 100644 vendor/github.com/hashicorp/go-hclog/global.go create mode 100644 vendor/github.com/hashicorp/go-hclog/interceptlogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/intlogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/logger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/nulllogger.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stacktrace.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stdlog.go create mode 100644 vendor/github.com/hashicorp/go-hclog/writer.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.gitignore create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/hashicorp/serf/LICENSE create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/version/version.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/client/v3/README.md create mode 100644 vendor/go.etcd.io/etcd/client/v3/auth.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/client.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/cluster.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/compact_op.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/compare.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/election.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/key.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/session.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/config.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/ctx.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/kv.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/lease.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/logger.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/maintenance.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/op.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/options.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/retry.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/sort.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/txn.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/utils.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/watch.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/yaml/config.go create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.golangci.yml create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/checklicense.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/internal/level_enabler.go create mode 100644 vendor/go.uber.org/zap/internal/pool/pool.go create mode 100644 vendor/go.uber.org/zap/internal/stacktrace/stack.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go create mode 100644 vendor/go.uber.org/zap/zapcore/clock.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/reflected_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go create mode 100644 vendor/go.uber.org/zap/zapgrpc/zapgrpc.go create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/LICENSE create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go diff --git a/go.mod b/go.mod index 23945d8c2f..26f690c304 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ replace ( require ( github.com/blang/semver/v4 v4.0.0 github.com/cilium/charts v0.0.0-20240131194518-c3dab910c790 - github.com/cilium/cilium v1.15.0 + github.com/cilium/cilium v1.15.1 github.com/cilium/hubble v0.13.0 github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413 github.com/cilium/workerpool v1.2.0 @@ -29,7 +29,7 @@ require ( github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb golang.org/x/mod v0.15.0 - google.golang.org/grpc v1.61.0 + google.golang.org/grpc v1.61.1 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c helm.sh/helm/v3 v3.14.0 k8s.io/api v0.29.0 @@ -42,30 +42,46 @@ require ( require ( github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/cilium/ebpf v0.12.3 // indirect github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 // indirect github.com/containerd/log v0.1.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/consul/api v1.26.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.11 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect + go.etcd.io/etcd/client/v3 v3.5.11 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + sigs.k8s.io/controller-runtime v0.16.3 // indirect ) require ( diff --git a/go.sum b/go.sum index 42381b8180..24c3c2093a 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,7 @@ github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8 github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -26,9 +27,17 @@ github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYx github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -38,6 +47,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= @@ -51,6 +61,7 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -62,8 +73,8 @@ github.com/cilium/charts v0.0.0-20240131194518-c3dab910c790 h1:frj4pesreRJlFaHgS github.com/cilium/charts v0.0.0-20240131194518-c3dab910c790/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.15.0 h1:2cebfYsOMvmIoDWghCkIr9CAsTkjdTlXYBH8im10uTs= -github.com/cilium/cilium v1.15.0/go.mod h1:MJ6X0Qo3Hem2CP+yCcVI5EDcvh4yT6+2cq55A6AKpnA= +github.com/cilium/cilium v1.15.1 h1:RnNgjVFmu7MgSs3rJXo/8Js/2Fv7q3sJ+DLQcudSJc8= +github.com/cilium/cilium v1.15.1/go.mod h1:MJ6X0Qo3Hem2CP+yCcVI5EDcvh4yT6+2cq55A6AKpnA= github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/cilium/hubble v0.13.0 h1:oAMUa3c6Rvm6+BBckTzc76o98RJkjApkjg1A7eNLXzM= @@ -74,6 +85,8 @@ github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413 h1:ANbBZd5 github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413/go.mod h1:phk10izbnP8+eLw/fnMZTaPyxNcZIlyarWBR+ncQ2cQ= github.com/cilium/workerpool v1.2.0 h1:Wc2iOPTvCgWKQXeq4L5tnx4QFEI+z5q1+bSpSS0cnAY= github.com/cilium/workerpool v1.2.0/go.mod h1:GOYJhwlnIjR+jWSDNBb5kw47G1H/XA9X4WOBpgr4pQU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO8= github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmDXacb+1J0= @@ -87,6 +100,12 @@ github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -131,8 +150,13 @@ github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBF github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= +github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= @@ -148,6 +172,7 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -155,6 +180,8 @@ github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= @@ -203,6 +230,7 @@ github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XE github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -229,6 +257,7 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= @@ -256,6 +285,8 @@ github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -272,17 +303,56 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= +github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -297,6 +367,7 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -347,26 +418,43 @@ github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= -github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= -github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -419,6 +507,9 @@ github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:Ff github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/osrg/gobgp/v3 v3.23.0 h1:2QTiSAiEuHXOqELC8Y4hBvxdqedIZfcwuUndOw4vilE= github.com/osrg/gobgp/v3 v3.23.0/go.mod h1:4fbscYpsCk14EO16nTWAdJyErO4MbAZ2zLJmsmeXu/k= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -434,11 +525,14 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -447,20 +541,24 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= @@ -472,12 +570,15 @@ github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzF github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= @@ -489,6 +590,7 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -518,17 +620,27 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= @@ -576,6 +688,12 @@ github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc h1:zkGwegkOW709y0oiAr github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0= github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= +go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= +go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= +go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= +go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= @@ -601,10 +719,13 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -634,11 +755,13 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= @@ -657,25 +780,39 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -694,6 +831,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -710,6 +848,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -738,8 +877,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -755,6 +894,7 @@ google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -764,6 +904,8 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -803,6 +945,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 0000000000..e5750f5720 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out + +.idea diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml new file mode 100644 index 0000000000..87d230c8d7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.x" + +env: + - GO111MODULE=on + +install: + - go get ./... + +script: + - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000000..106569e542 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 0000000000..aa73348c08 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000000..31098dd57e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000000..38136af3e4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000000..7c427aca97 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,339 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +var spaceReplacer = strings.NewReplacer(" ", "_") + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue + + // done is closed when this interval has ended, and a new IntervalMetrics + // has been created to receive any future metrics. + done chan struct{} +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + done: make(chan struct{}), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + // RWMutex is not safe to copy, so create a new instance on the copy + copyCurrent.RWMutex = sync.RWMutex{} + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v.deepCopy() + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v.deepCopy() + } + current.RUnlock() + + return intervals +} + +// getInterval returns the current interval. A new interval is created if no +// previous interval exists, or if the current time is beyond the window for the +// current interval. +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + + // Attempt to return the existing interval first, because it only requires + // a read lock. + i.intervalLock.RLock() + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + defer i.intervalLock.RUnlock() + return i.intervals[n-1] + } + i.intervalLock.RUnlock() + + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Re-check for an existing interval now that the lock is re-acquired. + n = len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + if n > 0 { + close(i.intervals[n-1].done) + } + + n++ + // Prune old intervals if the count exceeds the max. + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + + joined := strings.Join(parts, ".") + + spaceReplacer.WriteString(buf, joined) + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + key := i.flattenKey(parts) + buf := bytes.NewBufferString(key) + + for _, label := range labels { + spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 0000000000..24eefa9638 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,162 @@ +package metrics + +import ( + "context" + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = data[0] + default: + // Show the most recent finished interval if we have one + interval = data[n-2] + } + + return newMetricSummaryFromInterval(interval), nil +} + +func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary { + interval.RLock() + defer interval.RUnlock() + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} + +type Encoder interface { + Encode(interface{}) error +} + +// Stream writes metrics using encoder.Encode each time an interval ends. Runs +// until the request context is cancelled, or the encoder returns an error. +// The caller is responsible for logging any errors from encoder. +func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) { + interval := i.getInterval() + + for { + select { + case <-interval.done: + summary := newMetricSummaryFromInterval(interval) + if err := encoder.Encode(summary); err != nil { + return + } + + // update interval to the next one + interval = i.getInterval() + case <-ctx.Done(): + return + } + } +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000000..0937f4aedf --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 0000000000..36642a4293 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,299 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +func (m *Metrics) Shutdown() { + if ss, ok := m.sink.(ShutdownSink); ok { + ss.Shutdown() + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := []Label{} + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.EmitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) EmitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Creates a new slice with the provided string value as the first element +// and the provided slice values as the remaining values. +// Ordering of the values in the provided input slice is kept in tact in the output slice. +func insert(i int, v string, s []string) []string { + // Allocate new slice to avoid modifying the input slice + newS := make([]string, len(s)+1) + + // Copy s[0, i-1] into newS + for j := 0; j < i; j++ { + newS[j] = s[j] + } + + // Insert provided element at index i + newS[i] = v + + // Copy s[i, len(s)-1] into newS starting at newS[i+1] + for j := i; j < len(s); j++ { + newS[j+1] = s[j] + } + + return newS +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 0000000000..6f4108ff40 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,132 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +type ShutdownSink interface { + MetricSink + + // Shutdown the metric sink, flush metrics to storage, and cleanup resources. + // Called immediately prior to application exit. Implementations must block + // until metrics are flushed to storage. + Shutdown() +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) Shutdown() { + for _, s := range fh { + if ss, ok := s.(ShutdownSink); ok { + ss.Shutdown() + } + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 0000000000..38976f8dc9 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,158 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// Default returns the shared global metrics instance. +func Default() *Metrics { + return globalMetrics.Load().(*Metrics) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} + +// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage. +// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources. +// This is intended for use immediately prior to application exit. +func Shutdown() { + m := globalMetrics.Load().(*Metrics) + // Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a + // reason to expect that calls to the library will successfully collect metrics after Shutdown + // has been called. + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) + m.Shutdown() +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000000..1bfffce46e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 0000000000..6c0d284d2d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 46b6589605..99ff1bfa0c 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -97,7 +97,7 @@ Bernard Halas bernard.halas@berops.com Bill Mulligan billmulligan516@gmail.com Bingshen Wang bingshen.wbs@alibaba-inc.com Bingwu Yang detailyang@gmail.com -Birol Bilgin birol@cilium.io +Birol Bilgin birolbilgin@gmail.com Bob Bouteillier bob.bouteillier@datadoghq.com Bokang Li libokang.dev@gmail.com Bolun Zhao blzhao@google.com @@ -254,6 +254,7 @@ Frederic Branczyk fbranczyk@gmail.com Fred Hsu fredlhsu@gmail.com Fredrik Lönnegren fredrik.lonnegren@gmail.com Fulvio Risso fulvio.risso@polito.it +gailsuccess 157372272+gailsuccess@users.noreply.github.com Gaurav Genani h3llix.pvt@gmail.com Gaurav Yadav gaurav.dev.iiitm@gmail.com Gavin McNair gavin.mcnair@kaluza.com @@ -382,6 +383,7 @@ Kornilios Kourtis kornilios@isovalent.com kwakubiney kebiney@hotmail.com Laurent Bernaille laurent.bernaille@datadoghq.com Lawrence Gadban lawrence.gadban@solo.io +ldelossa louis.delos@gmail.com Lehner Florian dev@der-flo.net Leonard Cohnen lc@edgeless.systems leonliao xiaobo.liao@gmail.com @@ -406,7 +408,7 @@ lou-lan loulan@loulan.me Lucas Leblow lucasleblow@mailbox.org lucming 2876757716@qq.com Ludovic Ortega ludovic.ortega@adminafk.fr -Maartje Eyskens maartje.eyskens@isovalent.com +Maartje Eyskens maartje@eyskens.me Maciej Fijalkowski maciej.fijalkowski@intel.com Maciej Kwiek maciej@isovalent.com Maciej Skrocki maciejskrocki@google.com @@ -652,7 +654,7 @@ Sven Haardiek sven.haardiek@uni-muenster.de Swaminathan Vasudevan svasudevan@suse.com Taeung Song treeze.taeung@gmail.com Takayoshi Nishida takayoshi.nishida@gmail.com -Tamilmani tamanoha@microsoft.com +Tamilmani tamanoha@microsoft.comwq Tam Mach tam.mach@cilium.io Tasdik Rahman prodicus@outlook.com Te-Yu Chang dale.teyuchang@gmail.com diff --git a/vendor/github.com/cilium/cilium/daemon/k8s/init.go b/vendor/github.com/cilium/cilium/daemon/k8s/init.go new file mode 100644 index 0000000000..381b8ffac2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/daemon/k8s/init.go @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package k8s abstracts all Kubernetes specific behaviour +package k8s + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + ipamOption "github.com/cilium/cilium/pkg/ipam/option" + "github.com/cilium/cilium/pkg/k8s" + k8sConst "github.com/cilium/cilium/pkg/k8s/constants" + "github.com/cilium/cilium/pkg/k8s/resource" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/node" + nodeTypes "github.com/cilium/cilium/pkg/node/types" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/time" +) + +func retrieveNodeInformation(ctx context.Context, log logrus.FieldLogger, localNodeResource LocalNodeResource, localCiliumNodeResource LocalCiliumNodeResource) *nodeTypes.Node { + var n *nodeTypes.Node + waitForCIDR := func() error { + if option.Config.K8sRequireIPv4PodCIDR && n.IPv4AllocCIDR == nil { + return fmt.Errorf("required IPv4 PodCIDR not available") + } + if option.Config.K8sRequireIPv6PodCIDR && n.IPv6AllocCIDR == nil { + return fmt.Errorf("required IPv6 PodCIDR not available") + } + return nil + } + + if option.Config.IPAM == ipamOption.IPAMClusterPool { + for event := range localCiliumNodeResource.Events(ctx) { + if event.Kind == resource.Upsert { + no := nodeTypes.ParseCiliumNode(event.Object) + n = &no + log.WithField(logfields.NodeName, n.Name).Info("Retrieved node information from cilium node") + if err := waitForCIDR(); err != nil { + log.WithError(err).Warning("Waiting for k8s node information") + } else { + event.Done(nil) + break + } + } + event.Done(nil) + } + } else { + for event := range localNodeResource.Events(ctx) { + if event.Kind == resource.Upsert { + n = k8s.ParseNode(event.Object, source.Unspec) + log.WithField(logfields.NodeName, n.Name).Info("Retrieved node information from kubernetes node") + if err := waitForCIDR(); err != nil { + log.WithError(err).Warning("Waiting for k8s node information") + } else { + event.Done(nil) + break + } + } + event.Done(nil) + } + } + + return n +} + +// useNodeCIDR sets the ipv4-range and ipv6-range values values from the +// addresses defined in the given node. +func useNodeCIDR(n *nodeTypes.Node) { + if n.IPv4AllocCIDR != nil && option.Config.EnableIPv4 { + node.SetIPv4AllocRange(n.IPv4AllocCIDR) + } + if n.IPv6AllocCIDR != nil && option.Config.EnableIPv6 { + node.SetIPv6NodeRange(n.IPv6AllocCIDR) + } +} + +// WaitForNodeInformation retrieves the node information via the CiliumNode or +// Kubernetes Node resource. This function will block until the information is +// received. +func WaitForNodeInformation(ctx context.Context, log logrus.FieldLogger, localNode LocalNodeResource, localCiliumNode LocalCiliumNodeResource) error { + // Use of the environment variable overwrites the node-name + // automatically derived + nodeName := nodeTypes.GetName() + if nodeName == "" { + if option.Config.K8sRequireIPv4PodCIDR || option.Config.K8sRequireIPv6PodCIDR { + return fmt.Errorf("node name must be specified via environment variable '%s' to retrieve Kubernetes PodCIDR range", k8sConst.EnvNodeNameSpec) + } + if option.MightAutoDetectDevices() { + log.Info("K8s node name is empty. BPF NodePort might not be able to auto detect all devices") + } + return nil + } + + requireIPv4CIDR := option.Config.K8sRequireIPv4PodCIDR + requireIPv6CIDR := option.Config.K8sRequireIPv6PodCIDR + // If no CIDR is required, retrieving the node information is + // optional + // At this point it's not clear whether the device auto-detection will + // happen, as initKubeProxyReplacementOptions() might disable BPF NodePort. + // Anyway, to be on the safe side, don't give up waiting for a (Cilium)Node + // self object. + isNodeInformationOptional := (!requireIPv4CIDR && !requireIPv6CIDR && !option.MightAutoDetectDevices()) + // If node information is optional, let's wait 10 seconds only. + // It node information is required, wait indefinitely. + if isNodeInformationOptional { + newCtx, cancel := context.WithTimeout(ctx, time.Second*10) + ctx = newCtx + defer cancel() + } + + if n := retrieveNodeInformation(ctx, log, localNode, localCiliumNode); n != nil { + nodeIP4 := n.GetNodeIP(false) + nodeIP6 := n.GetNodeIP(true) + k8sNodeIP := n.GetK8sNodeIP() + + log.WithFields(logrus.Fields{ + logfields.NodeName: n.Name, + logfields.Labels: logfields.Repr(n.Labels), + logfields.IPAddr + ".ipv4": nodeIP4, + logfields.IPAddr + ".ipv6": nodeIP6, + logfields.V4Prefix: n.IPv4AllocCIDR, + logfields.V6Prefix: n.IPv6AllocCIDR, + logfields.K8sNodeIP: k8sNodeIP, + }).Info("Received own node information from API server") + + useNodeCIDR(n) + restoreRouterHostIPs(n, log) + } else { + // if node resource could not be received, fail if + // PodCIDR requirement has been requested + if requireIPv4CIDR || requireIPv6CIDR { + return fmt.Errorf("unable to derive PodCIDR via Node or CiliumNode resource") + } + } + + // Annotate addresses will occur later since the user might + // want to specify them manually + return nil +} + +// restoreRouterHostIPs restores (sets) the router IPs found from the +// Kubernetes resource. +// +// Note that it does not validate the correctness of the IPs, as that is done +// later in the daemon initialization when node.AutoComplete() is called. +func restoreRouterHostIPs(n *nodeTypes.Node, log logrus.FieldLogger) { + if !option.Config.EnableHostIPRestore { + return + } + + router4 := n.GetCiliumInternalIP(false) + router6 := n.GetCiliumInternalIP(true) + if router4 != nil { + node.SetInternalIPv4Router(router4) + } + if router6 != nil { + node.SetIPv6Router(router6) + } + if router4 != nil || router6 != nil { + log.WithFields(logrus.Fields{ + logfields.IPv4: router4, + logfields.IPv6: router6, + }).Info("Restored router IPs from node information") + } +} diff --git a/vendor/github.com/cilium/cilium/daemon/k8s/resources.go b/vendor/github.com/cilium/cilium/daemon/k8s/resources.go new file mode 100644 index 0000000000..9579d5dd96 --- /dev/null +++ b/vendor/github.com/cilium/cilium/daemon/k8s/resources.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/k8s" + cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + cilium_api_v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "github.com/cilium/cilium/pkg/k8s/client" + "github.com/cilium/cilium/pkg/k8s/resource" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + "github.com/cilium/cilium/pkg/k8s/types" + nodeTypes "github.com/cilium/cilium/pkg/node/types" +) + +var ( + // ResourcesCell provides a set of handles to Kubernetes resources used throughout the + // agent. Each of the resources share a client-go informer and backing store so we only + // have one watch API call for each resource kind and that we maintain only one copy of each object. + // + // See pkg/k8s/resource/resource.go for documentation on the Resource[T] type. + ResourcesCell = cell.Module( + "k8s-resources", + "Agent Kubernetes resources", + + cell.Config(k8s.DefaultConfig), + LocalNodeCell, + cell.Provide( + k8s.ServiceResource, + k8s.EndpointsResource, + k8s.NamespaceResource, + k8s.NetworkPolicyResource, + k8s.CiliumNetworkPolicyResource, + k8s.CiliumClusterwideNetworkPolicyResource, + k8s.CiliumCIDRGroupResource, + k8s.CiliumNodeResource, + k8s.CiliumSlimEndpointResource, + k8s.CiliumEndpointSliceResource, + ), + ) + + LocalNodeCell = cell.Module( + "k8s-local-node-resources", + "Agent Kubernetes local node resources", + + cell.Provide( + func(lc cell.Lifecycle, cs client.Clientset) (LocalNodeResource, error) { + return k8s.NodeResource( + lc, cs, + func(opts *metav1.ListOptions) { + opts.FieldSelector = fields.ParseSelectorOrDie("metadata.name=" + nodeTypes.GetName()).String() + }, + ) + }, + func(lc cell.Lifecycle, cs client.Clientset) (LocalCiliumNodeResource, error) { + return k8s.CiliumNodeResource( + lc, cs, + func(opts *metav1.ListOptions) { + opts.FieldSelector = fields.ParseSelectorOrDie("metadata.name=" + nodeTypes.GetName()).String() + }, + ) + }, + func(lc cell.Lifecycle, cs client.Clientset) (LocalPodResource, error) { + return k8s.PodResource( + lc, cs, + func(opts *metav1.ListOptions) { + opts.FieldSelector = fields.ParseSelectorOrDie("spec.nodeName=" + nodeTypes.GetName()).String() + }, + ) + }, + ), + ) +) + +// LocalNodeResource is a resource.Resource[*slim_corev1.Node] but one which will only stream updates for the node object +// associated with the node we are currently running on. +type LocalNodeResource resource.Resource[*slim_corev1.Node] + +// LocalCiliumNodeResource is a resource.Resource[*cilium_api_v2.CiliumNode] but one which will only stream updates for the +// CiliumNode object associated with the node we are currently running on. +type LocalCiliumNodeResource resource.Resource[*cilium_api_v2.CiliumNode] + +// LocalPodResource is a resource.Resource[*slim_corev1.Pod] but one which will only stream updates for pod +// objects scheduled on the node we are currently running on. +type LocalPodResource resource.Resource[*slim_corev1.Pod] + +// Resources is a convenience struct to group all the agent k8s resources as cell constructor parameters. +type Resources struct { + cell.In + + Services resource.Resource[*slim_corev1.Service] + Endpoints resource.Resource[*k8s.Endpoints] + LocalNode LocalNodeResource + LocalCiliumNode LocalCiliumNodeResource + LocalPods LocalPodResource + Namespaces resource.Resource[*slim_corev1.Namespace] + NetworkPolicies resource.Resource[*slim_networkingv1.NetworkPolicy] + CiliumNetworkPolicies resource.Resource[*cilium_api_v2.CiliumNetworkPolicy] + CiliumClusterwideNetworkPolicies resource.Resource[*cilium_api_v2.CiliumClusterwideNetworkPolicy] + CiliumCIDRGroups resource.Resource[*cilium_api_v2alpha1.CiliumCIDRGroup] + CiliumSlimEndpoint resource.Resource[*types.CiliumEndpoint] + CiliumEndpointSlice resource.Resource[*cilium_api_v2alpha1.CiliumEndpointSlice] +} + +// LocalNodeResources is a convenience struct to group CiliumNode and Node resources as cell constructor parameters. +type LocalNodeResources struct { + cell.In + + LocalNode LocalNodeResource + LocalCiliumNode LocalCiliumNodeResource +} diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go new file mode 100644 index 0000000000..0e6e4669b9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go @@ -0,0 +1,1069 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +import ( + "context" + "errors" + "fmt" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/backoff" + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/rate" + "github.com/cilium/cilium/pkg/time" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "allocator") +) + +const ( + // maxAllocAttempts is the number of attempted allocation requests + // performed before failing. + maxAllocAttempts = 16 +) + +// Allocator is a distributed ID allocator backed by a KVstore. It maps +// arbitrary keys to identifiers. Multiple users on different cluster nodes can +// in parallel request the ID for keys and are guaranteed to retrieve the same +// ID for an identical key. +// +// While the details of how keys are stored is delegated to Backend +// implementations, some expectations exist. See pkg/kvstore/allocator for +// details about the kvstore implementation. +// +// A node takes a reference to an identity when it is in-use on that node, and +// the identity remains in-use if there is any node reference to it. When an +// identity no longer has any node references, it may be garbage collected. No +// guarantees are made at that point and the numeric identity may be reused. +// Note that the numeric IDs are selected locally and verified with the Backend. +// +// Lookup ID by key: +// 1. Return ID from local cache updated by watcher (no Backend interactions) +// 2. Do ListPrefix() on slave key excluding node suffix, return the first +// result that matches the exact prefix. +// +// Lookup key by ID: +// 1. Return key from local cache updated by watcher (no Backend interactions) +// 2. Do Get() on master key, return result +// +// Allocate: +// 1. Check local key cache, increment, and return if key is already in use +// locally (no Backend interactions) +// 2. Check local cache updated by watcher, if... +// +// ... match found: +// +// 2.1 Create a new slave key. This operation is potentially racy as the master +// key can be removed in the meantime. +// - etcd: Create is made conditional on existence of master key +// - consul: locking +// +// ... match not found: +// +// 2.1 Select new unused id from local cache +// 2.2 Create a new master key with the condition that it may not exist +// 2.3 Create a new slave key +// +// 1.1. If found, increment and return (no Backend interactions) +// 2. Lookup ID by key in local cache or via first slave key found in Backend +// +// Release: +// 1. Reduce local reference count until last use (no Backend interactions) +// 2. Delete slave key (basePath/value/key1/node1) +// This automatically guarantees that when the last node has released the +// key, the key is no longer found by Get() +// 3. If the node goes down, all slave keys of that node are removed after +// the TTL expires (auto release). +type Allocator struct { + // events is a channel which will receive AllocatorEvent as IDs are + // added, modified or removed from the allocator + events AllocatorEventSendChan + + // keyType is an instance of the type to be used as allocator key. + keyType AllocatorKey + + // min is the lower limit when allocating IDs. The allocator will never + // allocate an ID lesser than this value. + min idpool.ID + + // max is the upper limit when allocating IDs. The allocator will never + // allocate an ID greater than this value. + max idpool.ID + + // prefixMask if set, will be ORed to all selected IDs prior to + // allocation + prefixMask idpool.ID + + // localKeys contains all keys including their reference count for keys + // which have been allocated and are in local use + localKeys *localKeys + + // suffix is the suffix attached to keys which must be node specific, + // this is typical set to the node's IP address + suffix string + + // backoffTemplate is the backoff configuration while allocating + backoffTemplate backoff.Exponential + + // slaveKeysMutex protects the concurrent access of the slave key by this + // agent. + slaveKeysMutex lock.Mutex + + // mainCache is the main cache, representing the allocator contents of + // the primary kvstore connection + mainCache cache + + // remoteCachesMutex protects accesse to remoteCaches + remoteCachesMutex lock.RWMutex + + // remoteCaches is the list of additional remote caches being watched + // in addition to the main cache + remoteCaches map[string]*RemoteCache + + // stopGC is the channel used to stop the garbage collector + stopGC chan struct{} + + // initialListDone is a channel that is closed when the initial + // synchronization has completed + initialListDone waitChan + + // idPool maintains a pool of available ids for allocation. + idPool idpool.IDPool + + // enableMasterKeyProtection if true, causes master keys that are still in + // local use to be automatically re-created + enableMasterKeyProtection bool + + // disableGC disables the garbage collector + disableGC bool + + // disableAutostart prevents starting the allocator when it is initialized + disableAutostart bool + + // backend is the upstream, shared, backend to which we syncronize local + // information + backend Backend +} + +// AllocatorOption is the base type for allocator options +type AllocatorOption func(*Allocator) + +// NewAllocatorForGC returns an allocator that can be used to run RunGC() +// +// The allocator can be configured by passing in additional options: +// - WithMin(id) - minimum ID to allocate (default: 1) +// - WithMax(id) - maximum ID to allocate (default max(uint64)) +func NewAllocatorForGC(backend Backend, opts ...AllocatorOption) *Allocator { + a := &Allocator{ + backend: backend, + min: idpool.ID(1), + max: idpool.ID(^uint64(0)), + } + + for _, fn := range opts { + fn(a) + } + + return a +} + +type GCStats struct { + // Alive is the number of identities alive + Alive int + + // Deleted is the number of identities deleted + Deleted int +} + +// Backend represents clients to remote ID allocation systems, such as KV +// Stores. These are used to coordinate key->ID allocation between cilium +// nodes. +type Backend interface { + // DeleteAllKeys will delete all keys. It is used in tests. + DeleteAllKeys(ctx context.Context) + + // Encode encodes a key string as required to conform to the key + // restrictions of the backend + Encode(string) string + + // AllocateID creates a new key->ID association. This is expected to be a + // create-only operation, and the ID may be allocated by another node. An + // error in that case is not expected to be fatal. The actual ID is obtained + // by Allocator from the local idPool, which is updated with used-IDs as the + // Backend makes calls to the handler in ListAndWatch. + // The implementation of the backend might return an AllocatorKey that is + // a copy of 'key' with an internal reference of the backend key or, if it + // doesn't use the internal reference of the backend key it simply returns + // 'key'. In case of an error the returned 'AllocatorKey' should be nil. + AllocateID(ctx context.Context, id idpool.ID, key AllocatorKey) (AllocatorKey, error) + + // AllocateIDIfLocked behaves like AllocateID but when lock is non-nil the + // operation proceeds only if it is still valid. + // The implementation of the backend might return an AllocatorKey that is + // a copy of 'key' with an internal reference of the backend key or, if it + // doesn't use the internal reference of the backend key it simply returns + // 'key'. In case of an error the returned 'AllocatorKey' should be nil. + AllocateIDIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) (AllocatorKey, error) + + // AcquireReference records that this node is using this key->ID mapping. + // This is distinct from any reference counting within this agent; only one + // reference exists for this node for any number of managed endpoints using + // it. + // The semantics of cleaning up stale references is delegated to the Backend + // implementation. RunGC may need to be invoked. + // This can race, and so lock can be provided (via a Lock call, below). + AcquireReference(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) error + + // Release releases the use of an ID associated with the provided key. It + // does not guard against concurrent calls to + // releases.Release(ctx context.Context, key AllocatorKey) (err error) + Release(ctx context.Context, id idpool.ID, key AllocatorKey) (err error) + + // UpdateKey refreshes the record that this node is using this key -> id + // mapping. When reliablyMissing is set it will also recreate missing master or + // slave keys. + UpdateKey(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool) error + + // UpdateKeyIfLocked behaves like UpdateKey but when lock is non-nil the operation proceeds only if it is still valid. + UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error + + // Get returns the allocated ID for this key as seen by the Backend. This may + // have been created by other agents. + Get(ctx context.Context, key AllocatorKey) (idpool.ID, error) + + // GetIfLocked behaves like Get, but but when lock is non-nil the + // operation proceeds only if it is still valid. + GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) + + // GetByID returns the key associated with this ID, as seen by the Backend. + // This may have been created by other agents. + GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error) + + // Lock provides an opaque lock object that can be used, later, to ensure + // that the key has not changed since the lock was created. This can be done + // with GetIfLocked. + Lock(ctx context.Context, key AllocatorKey) (kvstore.KVLocker, error) + + // ListAndWatch begins synchronizing the local Backend instance with its + // remote. + ListAndWatch(ctx context.Context, handler CacheMutations, stopChan chan struct{}) + + // RunGC reaps stale or unused identities within the Backend and makes them + // available for reuse. It is used by the cilium-operator and is not invoked + // by cilium-agent. + // Note: not all Backend implemenations rely on this, such as the kvstore + // backends, and may use leases to expire keys. + RunGC(ctx context.Context, rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64, minID idpool.ID, maxID idpool.ID) (map[string]uint64, *GCStats, error) + + // RunLocksGC reaps stale or unused locks within the Backend. It is used by + // the cilium-operator and is not invoked by cilium-agent. Returns + // a map of locks currently being held in the KVStore including the ones + // that failed to be GCed. + // Note: not all Backend implementations rely on this, such as the kvstore + // backends, and may use leases to expire keys. + RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) + + // Status returns a human-readable status of the Backend. + Status() (string, error) +} + +// NewAllocator creates a new Allocator. Any type can be used as key as long as +// the type implements the AllocatorKey interface. A variable of the type has +// to be passed into NewAllocator() to make the type known. The specified base +// path is used to prefix all keys in the kvstore. The provided path must be +// unique. +// +// The allocator can be configured by passing in additional options: +// - WithEvents() - enable Events channel +// - WithMin(id) - minimum ID to allocate (default: 1) +// - WithMax(id) - maximum ID to allocate (default max(uint64)) +// +// After creation, IDs can be allocated with Allocate() and released with +// Release() +func NewAllocator(typ AllocatorKey, backend Backend, opts ...AllocatorOption) (*Allocator, error) { + a := &Allocator{ + keyType: typ, + backend: backend, + min: idpool.ID(1), + max: idpool.ID(^uint64(0)), + localKeys: newLocalKeys(), + stopGC: make(chan struct{}), + suffix: uuid.New().String()[:10], + remoteCaches: map[string]*RemoteCache{}, + backoffTemplate: backoff.Exponential{ + Min: time.Duration(20) * time.Millisecond, + Factor: 2.0, + }, + } + + for _, fn := range opts { + fn(a) + } + + a.mainCache = newCache(a) + + if a.suffix == "" { + return nil, errors.New("allocator suffix is and unlikely unique") + } + + if a.min < 1 { + return nil, errors.New("minimum ID must be >= 1") + } + + if a.max <= a.min { + return nil, fmt.Errorf("maximum ID must be greater than minimum ID: configured max %v, min %v", a.max, a.min) + } + + a.idPool = idpool.NewIDPool(a.min, a.max) + + if !a.disableAutostart { + a.start() + } + + return a, nil +} + +func (a *Allocator) start() { + a.initialListDone = a.mainCache.start() + if !a.disableGC { + go func() { + select { + case <-a.initialListDone: + case <-time.After(option.Config.AllocatorListTimeout): + log.Fatalf("Timeout while waiting for initial allocator state") + } + a.startLocalKeySync() + }() + } +} + +// WithBackend sets this allocator to use backend. It is expected to be used at +// initialization. +func WithBackend(backend Backend) AllocatorOption { + return func(a *Allocator) { + a.backend = backend + } +} + +// WithEvents enables receiving of events. +// +// CAUTION: When using this function. The provided channel must be continuously +// read while NewAllocator() is being called to ensure that the channel does +// not block indefinitely while NewAllocator() emits events on it while +// populating the initial cache. +func WithEvents(events AllocatorEventSendChan) AllocatorOption { + return func(a *Allocator) { a.events = events } +} + +// WithMin sets the minimum identifier to be allocated +func WithMin(id idpool.ID) AllocatorOption { + return func(a *Allocator) { a.min = id } +} + +// WithMax sets the maximum identifier to be allocated +func WithMax(id idpool.ID) AllocatorOption { + return func(a *Allocator) { a.max = id } +} + +// WithPrefixMask sets the prefix used for all ID allocations. If set, the mask +// will be ORed to all selected IDs prior to allocation. It is the +// responsibility of the caller to ensure that the mask is not conflicting with +// min..max. +func WithPrefixMask(mask idpool.ID) AllocatorOption { + return func(a *Allocator) { a.prefixMask = mask } +} + +// WithMasterKeyProtection will watch for delete events on master keys and +// re-created them if local usage suggests that the key is still in use +func WithMasterKeyProtection() AllocatorOption { + return func(a *Allocator) { a.enableMasterKeyProtection = true } +} + +// WithoutGC disables the use of the garbage collector +func WithoutGC() AllocatorOption { + return func(a *Allocator) { a.disableGC = true } +} + +// WithoutAutostart prevents starting the allocator when it is initialized +func WithoutAutostart() AllocatorOption { + return func(a *Allocator) { a.disableAutostart = true } +} + +// GetEvents returns the events channel given to the allocator when +// constructed. +// Note: This channel is not owned by the allocator! +func (a *Allocator) GetEvents() AllocatorEventSendChan { + return a.events +} + +// Delete deletes an allocator and stops the garbage collector +func (a *Allocator) Delete() { + close(a.stopGC) + a.mainCache.stop() +} + +// WaitForInitialSync waits until the initial sync is complete +func (a *Allocator) WaitForInitialSync(ctx context.Context) error { + select { + case <-a.initialListDone: + case <-ctx.Done(): + return fmt.Errorf("identity sync was cancelled: %s", ctx.Err()) + } + + return nil +} + +// RangeFunc is the function called by RangeCache +type RangeFunc func(idpool.ID, AllocatorKey) + +// ForeachCache iterates over the allocator cache and calls RangeFunc on each +// cached entry +func (a *Allocator) ForeachCache(cb RangeFunc) { + a.mainCache.foreach(cb) + + a.remoteCachesMutex.RLock() + for _, rc := range a.remoteCaches { + rc.cache.foreach(cb) + } + a.remoteCachesMutex.RUnlock() +} + +// selectAvailableID selects an available ID. +// Returns a triple of the selected ID ORed with prefixMask, the ID string and +// the originally selected ID. +func (a *Allocator) selectAvailableID() (idpool.ID, string, idpool.ID) { + if id := a.idPool.LeaseAvailableID(); id != idpool.NoID { + unmaskedID := id + id |= a.prefixMask + return id, id.String(), unmaskedID + } + + return 0, "", 0 +} + +// AllocatorKey is the interface to implement in order for a type to be used as +// key for the allocator. The key's data is assumed to be a collection of +// pkg/label.Label, and the functions reflect this somewhat. +type AllocatorKey interface { + fmt.Stringer + + // GetKey returns the canonical string representation of the key + GetKey() string + + // PutKey stores the information in v into the key. This is the inverse + // operation to GetKey + PutKey(v string) AllocatorKey + + // GetAsMap returns the key as a collection of "labels" with a key and value. + // This is the inverse operation to PutKeyFromMap. + GetAsMap() map[string]string + + // PutKeyFromMap stores the labels in v into the key to be used later. This + // is the inverse operation to GetAsMap. + PutKeyFromMap(v map[string]string) AllocatorKey + + // PutValue puts metadata inside the global identity for the given 'key' with + // the given 'value'. + PutValue(key any, value any) AllocatorKey + + // Value returns the value stored in the metadata map. + Value(key any) any +} + +func (a *Allocator) encodeKey(key AllocatorKey) string { + return a.backend.Encode(key.GetKey()) +} + +// Return values: +// 1. allocated ID +// 2. whether the ID is newly allocated from kvstore +// 3. whether this is the first owner that holds a reference to the key in +// localkeys store +// 4. error in case of failure +func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) { + var firstUse bool + + kvstore.Trace("Allocating key in kvstore", nil, logrus.Fields{fieldKey: key}) + + k := a.encodeKey(key) + lock, err := a.backend.Lock(ctx, key) + if err != nil { + return 0, false, false, err + } + + defer lock.Unlock(context.Background()) + + // fetch first key that matches /value/ while ignoring the + // node suffix + value, err := a.GetIfLocked(ctx, key, lock) + if err != nil { + return 0, false, false, err + } + + kvstore.Trace("kvstore state is: ", nil, logrus.Fields{fieldID: value}) + + a.slaveKeysMutex.Lock() + defer a.slaveKeysMutex.Unlock() + + // We shouldn't assume the fact the master key does not exist in the kvstore + // that localKeys does not have it. The KVStore might have lost all of its + // data but the local agent still holds a reference for the given master key. + if value == 0 { + value = a.localKeys.lookupKey(k) + if value != 0 { + // re-create master key + if err := a.backend.UpdateKeyIfLocked(ctx, value, key, true, lock); err != nil { + return 0, false, false, fmt.Errorf("unable to re-create missing master key '%s': %s while allocating ID: %s", key, value, err) + } + } + } else { + _, firstUse, err = a.localKeys.allocate(k, key, value) + if err != nil { + return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %s", k, err) + } + + if firstUse { + log.WithField(fieldKey, k).Debug("Reserved new local key") + } else { + log.WithField(fieldKey, k).Debug("Reusing existing local key") + } + } + + if value != 0 { + log.WithField(fieldKey, k).Info("Reusing existing global key") + + if err = a.backend.AcquireReference(ctx, value, key, lock); err != nil { + a.localKeys.release(k) + return 0, false, false, fmt.Errorf("unable to create secondary key '%s': %s", k, err) + } + + // mark the key as verified in the local cache + if err := a.localKeys.verify(k); err != nil { + log.WithError(err).Error("BUG: Unable to verify local key") + } + + return value, false, firstUse, nil + } + + log.WithField(fieldKey, k).Debug("Allocating new master ID") + id, strID, unmaskedID := a.selectAvailableID() + if id == 0 { + return 0, false, false, fmt.Errorf("no more available IDs in configured space") + } + + kvstore.Trace("Selected available key ID", nil, logrus.Fields{fieldID: id}) + + releaseKeyAndID := func() { + a.localKeys.release(k) + a.idPool.Release(unmaskedID) // This returns this ID to be re-used for other keys + } + + oldID, firstUse, err := a.localKeys.allocate(k, key, id) + if err != nil { + a.idPool.Release(unmaskedID) + return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %s", k, err) + } + + // Another local writer beat us to allocating an ID for the same key, + // start over + if id != oldID { + releaseKeyAndID() + return 0, false, false, fmt.Errorf("another writer has allocated key %s", k) + } + + // Check that this key has not been allocated in the cluster during our + // operation here + value, err = a.GetNoCache(ctx, key) + if err != nil { + releaseKeyAndID() + return 0, false, false, err + } + if value != 0 { + releaseKeyAndID() + return 0, false, false, fmt.Errorf("Found master key after proceeding with new allocation for %s", k) + } + + // Assigned to 'key' from 'key2' since in case of an error, we don't replace + // the original 'key' variable with 'nil'. + key2 := key + key, err = a.backend.AllocateIDIfLocked(ctx, id, key2, lock) + if err != nil { + // Creation failed. Another agent most likely beat us to allocting this + // ID, retry. + releaseKeyAndID() + return 0, false, false, fmt.Errorf("unable to allocate ID %s for key %s: %s", strID, key2, err) + } + + // Notify pool that leased ID is now in-use. + a.idPool.Use(unmaskedID) + + if err = a.backend.AcquireReference(ctx, id, key, lock); err != nil { + // We will leak the master key here as the key has already been + // exposed and may be in use by other nodes. The garbage + // collector will release it again. + releaseKeyAndID() + return 0, false, false, fmt.Errorf("secondary key creation failed '%s': %s", k, err) + } + + // mark the key as verified in the local cache + if err := a.localKeys.verify(k); err != nil { + log.WithError(err).Error("BUG: Unable to verify local key") + } + + log.WithField(fieldKey, k).Info("Allocated new global key") + + return id, true, firstUse, nil +} + +// Allocate will retrieve the ID for the provided key. If no ID has been +// allocated for this key yet, a key will be allocated. If allocation fails, +// most likely due to a parallel allocation of the same ID by another user, +// allocation is re-attempted for maxAllocAttempts times. +// +// Return values: +// 1. allocated ID +// 2. whether the ID is newly allocated from kvstore +// 3. whether this is the first owner that holds a reference to the key in +// localkeys store +// 4. error in case of failure +func (a *Allocator) Allocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) { + var ( + err error + value idpool.ID + isNew bool + firstUse bool + k = a.encodeKey(key) + ) + + log.WithField(fieldKey, key).Debug("Allocating key") + + select { + case <-a.initialListDone: + case <-ctx.Done(): + return 0, false, false, fmt.Errorf("allocation was cancelled while waiting for initial key list to be received: %s", ctx.Err()) + } + + kvstore.Trace("Allocating from kvstore", nil, logrus.Fields{fieldKey: key}) + + // make a copy of the template and customize it + boff := a.backoffTemplate + boff.Name = key.String() + + for attempt := 0; attempt < maxAllocAttempts; attempt++ { + // Check our list of local keys already in use and increment the + // refcnt. The returned key must be released afterwards. No kvstore + // operation was performed for this allocation. + // We also do this on every loop as a different Allocate call might have + // allocated the key while we are attempting to allocate in this + // execution thread. It does not hurt to check if localKeys contains a + // reference for the key that we are attempting to allocate. + if val := a.localKeys.use(k); val != idpool.NoID { + kvstore.Trace("Reusing local id", nil, logrus.Fields{fieldID: val, fieldKey: key}) + a.mainCache.insert(key, val) + return val, false, false, nil + } + + // FIXME: Add non-locking variant + value, isNew, firstUse, err = a.lockedAllocate(ctx, key) + if err == nil { + a.mainCache.insert(key, value) + log.WithField(fieldKey, key).WithField(fieldID, value).Debug("Allocated key") + return value, isNew, firstUse, nil + } + + scopedLog := log.WithFields(logrus.Fields{ + fieldKey: key, + logfields.Attempt: attempt, + }) + + select { + case <-ctx.Done(): + scopedLog.WithError(ctx.Err()).Warning("Ongoing key allocation has been cancelled") + return 0, false, false, fmt.Errorf("key allocation cancelled: %s", ctx.Err()) + default: + scopedLog.WithError(err).Warning("Key allocation attempt failed") + } + + kvstore.Trace("Allocation attempt failed", err, logrus.Fields{fieldKey: key, logfields.Attempt: attempt}) + + if waitErr := boff.Wait(ctx); waitErr != nil { + return 0, false, false, waitErr + } + } + + return 0, false, false, err +} + +// GetIfLocked returns the ID which is allocated to a key. Returns an ID of NoID if no ID +// has been allocated to this key yet if the client is still holding the given +// lock. +func (a *Allocator) GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) { + if id := a.mainCache.get(a.encodeKey(key)); id != idpool.NoID { + return id, nil + } + + return a.backend.GetIfLocked(ctx, key, lock) +} + +// Get returns the ID which is allocated to a key. Returns an ID of NoID if no ID +// has been allocated to this key yet. +func (a *Allocator) Get(ctx context.Context, key AllocatorKey) (idpool.ID, error) { + if id := a.mainCache.get(a.encodeKey(key)); id != idpool.NoID { + return id, nil + } + + return a.GetNoCache(ctx, key) +} + +// GetNoCache returns the ID which is allocated to a key in the kvstore, +// bypassing the local copy of allocated keys. +func (a *Allocator) GetNoCache(ctx context.Context, key AllocatorKey) (idpool.ID, error) { + return a.backend.Get(ctx, key) +} + +// GetByID returns the key associated with an ID. Returns nil if no key is +// associated with the ID. +func (a *Allocator) GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error) { + if key := a.mainCache.getByID(id); key != nil { + return key, nil + } + + return a.backend.GetByID(ctx, id) +} + +// GetIncludeRemoteCaches returns the ID which is allocated to a key. Includes the +// caches of watched remote kvstores in the query. Returns an ID of NoID if no +// ID has been allocated in any remote kvstore to this key yet. +func (a *Allocator) GetIncludeRemoteCaches(ctx context.Context, key AllocatorKey) (idpool.ID, error) { + encoded := a.encodeKey(key) + + // check main cache first + if id := a.mainCache.get(encoded); id != idpool.NoID { + return id, nil + } + + // check remote caches + a.remoteCachesMutex.RLock() + for _, rc := range a.remoteCaches { + if id := rc.cache.get(encoded); id != idpool.NoID { + a.remoteCachesMutex.RUnlock() + return id, nil + } + } + a.remoteCachesMutex.RUnlock() + + // check main backend + if id, err := a.backend.Get(ctx, key); id != idpool.NoID || err != nil { + return id, err + } + + // we skip checking remote backends explicitly here, to avoid + // accidentally overloading them in case of lookups for invalid identities + + return idpool.NoID, nil +} + +// GetByIDIncludeRemoteCaches returns the key associated with an ID. Includes +// the caches of watched remote kvstores in the query. +// Returns nil if no key is associated with the ID. +func (a *Allocator) GetByIDIncludeRemoteCaches(ctx context.Context, id idpool.ID) (AllocatorKey, error) { + // check main cache first + if key := a.mainCache.getByID(id); key != nil { + return key, nil + } + + // check remote caches + a.remoteCachesMutex.RLock() + for _, rc := range a.remoteCaches { + if key := rc.cache.getByID(id); key != nil { + a.remoteCachesMutex.RUnlock() + return key, nil + } + } + a.remoteCachesMutex.RUnlock() + + // check main backend + if key, err := a.backend.GetByID(ctx, id); key != nil || err != nil { + return key, err + } + + // we skip checking remote backends explicitly here, to avoid + // accidentally overloading them in case of lookups for invalid identities + + return nil, nil +} + +// Release releases the use of an ID associated with the provided key. After +// the last user has released the ID, the key is removed in the KVstore and +// the returned lastUse value is true. +func (a *Allocator) Release(ctx context.Context, key AllocatorKey) (lastUse bool, err error) { + log.WithField(fieldKey, key).Info("Releasing key") + + select { + case <-a.initialListDone: + case <-ctx.Done(): + return false, fmt.Errorf("release was cancelled while waiting for initial key list to be received: %s", ctx.Err()) + } + + k := a.encodeKey(key) + + a.slaveKeysMutex.Lock() + defer a.slaveKeysMutex.Unlock() + + // release the key locally, if it was the last use, remove the node + // specific value key to remove the global reference mark + var id idpool.ID + lastUse, id, err = a.localKeys.release(k) + if err != nil { + return lastUse, err + } + if lastUse { + // Since in CRD mode we don't have a way to map which identity is being + // used by a node, we need to also pass the ID to the release function. + // This allows the CRD store to find the right identity by its ID and + // remove the node reference on that identity. + a.backend.Release(ctx, id, key) + } + + return lastUse, err +} + +// RunGC scans the kvstore for unused master keys and removes them +func (a *Allocator) RunGC(rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64) (map[string]uint64, *GCStats, error) { + return a.backend.RunGC(context.TODO(), rateLimit, staleKeysPrevRound, a.min, a.max) +} + +// RunLocksGC scans the kvstore for stale locks and removes them +func (a *Allocator) RunLocksGC(ctx context.Context, staleLocksPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) { + return a.backend.RunLocksGC(ctx, staleLocksPrevRound) +} + +// DeleteAllKeys will delete all keys. It is expected to be used in tests. +func (a *Allocator) DeleteAllKeys() { + a.backend.DeleteAllKeys(context.TODO()) +} + +// syncLocalKeys checks the kvstore and verifies that a master key exists for +// all locally used allocations. This will restore master keys if deleted for +// some reason. +func (a *Allocator) syncLocalKeys() error { + // Create a local copy of all local allocations to not require to hold + // any locks while performing kvstore operations. Local use can + // disappear while we perform the sync but that is fine as worst case, + // a master key is created for a slave key that no longer exists. The + // garbage collector will remove it again. + ids := a.localKeys.getVerifiedIDs() + + for id, value := range ids { + if err := a.backend.UpdateKey(context.TODO(), id, value, false); err != nil { + log.WithError(err).WithFields(logrus.Fields{ + fieldKey: value, + fieldID: id, + }).Warning("Unable to sync key") + } + } + + return nil +} + +func (a *Allocator) startLocalKeySync() { + go func(a *Allocator) { + kvTimer, kvTimerDone := inctimer.New() + defer kvTimerDone() + for { + if err := a.syncLocalKeys(); err != nil { + log.WithError(err).Warning("Unable to run local key sync routine") + } + + select { + case <-a.stopGC: + log.Debug("Stopped master key sync routine") + return + case <-kvTimer.After(option.Config.KVstorePeriodicSync): + } + } + }(a) +} + +// AllocatorEventChan is a channel to receive allocator events on +type AllocatorEventChan chan AllocatorEvent + +// Send- and receive-only versions of the above. +type AllocatorEventRecvChan = <-chan AllocatorEvent +type AllocatorEventSendChan = chan<- AllocatorEvent + +// AllocatorEvent is an event sent over AllocatorEventChan +type AllocatorEvent struct { + // Typ is the type of event (create / modify / delete) + Typ kvstore.EventType + + // ID is the allocated ID + ID idpool.ID + + // Key is the key associated with the ID + Key AllocatorKey +} + +// RemoteCache represents the cache content of an additional kvstore managing +// identities. The contents are not directly accessible but will be merged into +// the ForeachCache() function. +type RemoteCache struct { + name string + + allocator *Allocator + cache *cache + + watchFunc func(ctx context.Context, remote *RemoteCache, onSync func(context.Context)) +} + +func (a *Allocator) NewRemoteCache(remoteName string, remoteAlloc *Allocator) *RemoteCache { + return &RemoteCache{ + name: remoteName, + allocator: remoteAlloc, + cache: &remoteAlloc.mainCache, + + watchFunc: a.WatchRemoteKVStore, + } +} + +// WatchRemoteKVStore starts watching an allocator base prefix the kvstore +// represents by the provided backend. A local cache of all identities of that +// kvstore will be maintained in the RemoteCache structure returned and will +// start being reported in the identities returned by the ForeachCache() +// function. RemoteName should be unique per logical "remote". +func (a *Allocator) WatchRemoteKVStore(ctx context.Context, rc *RemoteCache, onSync func(context.Context)) { + scopedLog := log.WithField(logfields.ClusterName, rc.name) + scopedLog.Info("Starting remote kvstore watcher") + + rc.allocator.start() + + select { + case <-ctx.Done(): + scopedLog.Debug("Context canceled before remote kvstore watcher synchronization completed: stale identities will now be drained") + rc.close() + + a.remoteCachesMutex.RLock() + old := a.remoteCaches[rc.name] + a.remoteCachesMutex.RUnlock() + + if old != nil { + old.cache.mutex.RLock() + defer old.cache.mutex.RUnlock() + } + + // Drain all entries that might have been received until now, and that + // are not present in the current cache (if any). This ensures we do not + // leak any stale identity, and at the same time we do not invalidate the + // current state. + rc.cache.drainIf(func(id idpool.ID) bool { + if old == nil { + return true + } + + _, ok := old.cache.nextCache[id] + return !ok + }) + return + + case <-rc.cache.listDone: + scopedLog.Info("Remote kvstore watcher successfully synchronized and registered") + } + + a.remoteCachesMutex.Lock() + old := a.remoteCaches[rc.name] + a.remoteCaches[rc.name] = rc + a.remoteCachesMutex.Unlock() + + if old != nil { + // In case of reconnection, let's emit a deletion event for all stale identities + // that are no longer present in the kvstore. We take the lock of the new cache + // to ensure that we observe a stable state during this process (i.e., no keys + // are added/removed in the meanwhile). + scopedLog.Debug("Another kvstore watcher was already registered: deleting stale identities") + rc.cache.mutex.RLock() + old.cache.drainIf(func(id idpool.ID) bool { + _, ok := rc.cache.nextCache[id] + return !ok + }) + rc.cache.mutex.RUnlock() + } + + // Execute the on-sync callback handler. + onSync(ctx) + + <-ctx.Done() + rc.close() + scopedLog.Info("Stopped remote kvstore watcher") +} + +// RemoveRemoteKVStore removes any reference to a remote allocator / kvstore, emitting +// a deletion event for all previously known identities. +func (a *Allocator) RemoveRemoteKVStore(remoteName string) { + a.remoteCachesMutex.Lock() + old := a.remoteCaches[remoteName] + delete(a.remoteCaches, remoteName) + a.remoteCachesMutex.Unlock() + + if old != nil { + old.cache.drain() + log.WithField(logfields.ClusterName, remoteName).Info("Remote kvstore watcher unregistered") + } +} + +// Watch starts watching the remote kvstore and synchronize the identities in +// the local cache. It blocks until the context is closed. +func (rc *RemoteCache) Watch(ctx context.Context, onSync func(context.Context)) { + rc.watchFunc(ctx, rc, onSync) +} + +// NumEntries returns the number of entries in the remote cache +func (rc *RemoteCache) NumEntries() int { + if rc == nil { + return 0 + } + + return rc.cache.numEntries() +} + +// Synced returns whether the initial list of entries has been retrieved from +// the kvstore, and new events are currently being watched. +func (rc *RemoteCache) Synced() bool { + if rc == nil { + return false + } + + select { + case <-rc.cache.stopChan: + return false + default: + select { + case <-rc.cache.listDone: + return true + default: + return false + } + } +} + +// close stops watching for identities in the kvstore associated with the +// remote cache. +func (rc *RemoteCache) close() { + rc.cache.allocator.Delete() +} + +// Observe the identity changes. Conforms to stream.Observable. +// Replays the current state of the cache when subscribing. +func (a *Allocator) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) { + a.mainCache.Observe(ctx, next, complete) +} diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/cache.go b/vendor/github.com/cilium/cilium/pkg/allocator/cache.go new file mode 100644 index 0000000000..774aca1eeb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/allocator/cache.go @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +import ( + "context" + "sync" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/stream" + "github.com/cilium/cilium/pkg/time" +) + +// backendOpTimeout is the time allowed for operations sent to backends in +// response to events such as create/modify/delete. +const backendOpTimeout = 10 * time.Second + +// idMap provides mapping from ID to an AllocatorKey +type idMap map[idpool.ID]AllocatorKey + +// keyMap provides mapping from AllocatorKey to ID +type keyMap map[string]idpool.ID + +type cache struct { + controllers *controller.Manager + + allocator *Allocator + + stopChan chan struct{} + + // mutex protects all cache data structures + mutex lock.RWMutex + + // cache is a local cache of all IDs allocated in the kvstore. It is + // being maintained by watching for kvstore events and can thus lag + // behind. + cache idMap + + // keyCache shadows cache and allows access by key + keyCache keyMap + + // nextCache is the cache is constantly being filled by startWatch(), + // when startWatch has successfully performed the initial fill using + // ListPrefix, the cache above will be pointed to nextCache. If the + // startWatch() fails to perform the initial list, then the cache is + // never pointed to nextCache. This guarantees that a valid cache is + // kept at all times. + nextCache idMap + + // nextKeyCache follows the same logic as nextCache but for keyCache + nextKeyCache keyMap + + listDone waitChan + + // stopWatchWg is a wait group that gets conditions added when a + // watcher is started with the conditions marked as done when the + // watcher has exited + stopWatchWg sync.WaitGroup + + changeSrc stream.Observable[AllocatorChange] + emitChange func(AllocatorChange) + completeChangeSrc func(error) +} + +func newCache(a *Allocator) (c cache) { + c = cache{ + allocator: a, + cache: idMap{}, + keyCache: keyMap{}, + stopChan: make(chan struct{}), + controllers: controller.NewManager(), + } + c.changeSrc, c.emitChange, c.completeChangeSrc = stream.Multicast[AllocatorChange]() + return +} + +type waitChan chan struct{} + +// CacheMutations are the operations given to a Backend's ListAndWatch command. +// They are called on changes to identities. +type CacheMutations interface { + // OnListDone is called when the initial full-sync is complete. + OnListDone() + + // OnAdd is called when a new key->ID appears. + OnAdd(id idpool.ID, key AllocatorKey) + + // OnModify is called when a key->ID mapping is modified. This may happen + // when leases are updated, and does not mean the actual mapping had changed. + OnModify(id idpool.ID, key AllocatorKey) + + // OnDelete is called when a key->ID mapping is removed. This may trigger + // master-key protection, if enabled, where the local allocator will recreate + // the key->ID association is recreated because the local node is still using + // it. + OnDelete(id idpool.ID, key AllocatorKey) +} + +func (c *cache) sendEvent(typ kvstore.EventType, id idpool.ID, key AllocatorKey) { + if events := c.allocator.events; events != nil { + events <- AllocatorEvent{Typ: typ, ID: id, Key: key} + } +} + +func (c *cache) OnListDone() { + c.mutex.Lock() + // nextCache is valid, point the live cache to it + c.cache = c.nextCache + c.keyCache = c.nextKeyCache + c.mutex.Unlock() + + log.Debug("Initial list of identities received") + + // report that the list operation has + // been completed and the allocator is + // ready to use + close(c.listDone) +} + +func (c *cache) OnAdd(id idpool.ID, key AllocatorKey) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.nextCache[id] = key + if key != nil { + c.nextKeyCache[c.allocator.encodeKey(key)] = id + } + c.allocator.idPool.Remove(id) + + c.emitChange(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key}) + + c.sendEvent(kvstore.EventTypeCreate, id, key) +} + +func (c *cache) OnModify(id idpool.ID, key AllocatorKey) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if k, ok := c.nextCache[id]; ok { + delete(c.nextKeyCache, c.allocator.encodeKey(k)) + } + + c.nextCache[id] = key + if key != nil { + c.nextKeyCache[c.allocator.encodeKey(key)] = id + } + + c.emitChange(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key}) + + c.sendEvent(kvstore.EventTypeModify, id, key) +} + +func (c *cache) OnDelete(id idpool.ID, key AllocatorKey) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.onDeleteLocked(id, key, true) +} + +const syncIdentityControllerGroup = "sync-identity" + +func syncControllerName(id idpool.ID) string { + return syncIdentityControllerGroup + "-" + id.String() +} + +// no max interval by default, exposed as a variable for testing. +var masterKeyRecreateMaxInterval = time.Duration(0) + +var syncIdentityGroup = controller.NewGroup(syncIdentityControllerGroup) + +// onDeleteLocked must be called while holding c.Mutex for writing +func (c *cache) onDeleteLocked(id idpool.ID, key AllocatorKey, recreateMissingLocalKeys bool) { + a := c.allocator + if a.enableMasterKeyProtection && recreateMissingLocalKeys { + if value := a.localKeys.lookupID(id); value != nil { + c.controllers.UpdateController(syncControllerName(id), controller.ControllerParams{ + Context: context.Background(), + MaxRetryInterval: masterKeyRecreateMaxInterval, + Group: syncIdentityGroup, + DoFunc: func(ctx context.Context) error { + c.mutex.Lock() + defer c.mutex.Unlock() + // For each attempt, check if this ciliumidentity is still a candidate for recreation. + // It's possible that since the last iteration that this agent has legitimately deleted + // the key, in which case we can stop trying to recreate it. + if value := c.allocator.localKeys.lookupID(id); value == nil { + return nil + } + + ctx, cancel := context.WithTimeout(ctx, backendOpTimeout) + defer cancel() + + // Each iteration will attempt to grab the key reference, if that succeeds + // then this completes (i.e. the key exists). + // Otherwise we will attempt to create the key, this process repeats until + // the key is created. + if err := a.backend.UpdateKey(ctx, id, value, true); err != nil { + log.WithField("id", id).WithError(err).Error("OnDelete MasterKeyProtection update for key") + return err + } + log.WithField("id", id).Info("OnDelete MasterKeyProtection update succeeded") + return nil + }, + }) + + return + } + } + + if k, ok := c.nextCache[id]; ok && k != nil { + delete(c.nextKeyCache, c.allocator.encodeKey(k)) + } + + delete(c.nextCache, id) + a.idPool.Insert(id) + + c.emitChange(AllocatorChange{Kind: AllocatorChangeDelete, ID: id, Key: key}) + + c.sendEvent(kvstore.EventTypeDelete, id, key) +} + +// start requests a LIST operation from the kvstore and starts watching the +// prefix in a go subroutine. +func (c *cache) start() waitChan { + c.listDone = make(waitChan) + + c.mutex.Lock() + + // start with a fresh nextCache + c.nextCache = idMap{} + c.nextKeyCache = keyMap{} + c.mutex.Unlock() + + c.stopWatchWg.Add(1) + + go func() { + c.allocator.backend.ListAndWatch(context.TODO(), c, c.stopChan) + c.stopWatchWg.Done() + }() + + return c.listDone +} + +func (c *cache) stop() { + close(c.stopChan) + c.stopWatchWg.Wait() + // Drain/stop any remaining sync identity controllers. + // Backend watch is now stopped, any running controllers attempting to + // sync identities will complete and stop (possibly in a unresolved state). + c.controllers.RemoveAllAndWait() + c.completeChangeSrc(nil) +} + +// drain emits a deletion event for all known IDs. It must be called after the +// cache has been stopped, to ensure that no new events can be received afterwards. +func (c *cache) drain() { + // Make sure we wait until the watch loop has been properly stopped. + c.stopWatchWg.Wait() + + c.mutex.Lock() + for id, key := range c.nextCache { + c.onDeleteLocked(id, key, false) + } + c.mutex.Unlock() +} + +// drainIf emits a deletion event for all known IDs that are stale according to +// the isStale function. It must be called after the cache has been stopped, to +// ensure that no new events can be received afterwards. +func (c *cache) drainIf(isStale func(id idpool.ID) bool) { + // Make sure we wait until the watch loop has been properly stopped, otherwise + // new IDs might be added afterwards we complete the draining process. + c.stopWatchWg.Wait() + + c.mutex.Lock() + for id, key := range c.nextCache { + if isStale(id) { + c.onDeleteLocked(id, key, false) + log.WithFields(logrus.Fields{fieldID: id, fieldKey: key}). + Debug("Stale identity deleted") + } + } + c.mutex.Unlock() +} + +func (c *cache) get(key string) idpool.ID { + c.mutex.RLock() + if id, ok := c.keyCache[key]; ok { + c.mutex.RUnlock() + return id + } + c.mutex.RUnlock() + + return idpool.NoID +} + +func (c *cache) getByID(id idpool.ID) AllocatorKey { + c.mutex.RLock() + if v, ok := c.cache[id]; ok { + c.mutex.RUnlock() + return v + } + c.mutex.RUnlock() + + return nil +} + +func (c *cache) foreach(cb RangeFunc) { + c.mutex.RLock() + for k, v := range c.cache { + cb(k, v) + } + c.mutex.RUnlock() +} + +func (c *cache) insert(key AllocatorKey, val idpool.ID) { + c.mutex.Lock() + c.nextCache[val] = key + c.nextKeyCache[c.allocator.encodeKey(key)] = val + c.mutex.Unlock() +} + +func (c *cache) numEntries() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return len(c.nextCache) +} + +type AllocatorChangeKind string + +const ( + AllocatorChangeSync AllocatorChangeKind = "sync" + AllocatorChangeUpsert AllocatorChangeKind = "upsert" + AllocatorChangeDelete AllocatorChangeKind = "delete" +) + +type AllocatorChange struct { + Kind AllocatorChangeKind + ID idpool.ID + Key AllocatorKey +} + +// Observe the allocator changes. Conforms to stream.Observable. +// Replays the current state of the cache when subscribing. +func (c *cache) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) { + // This short-lived go routine serves the purpose of replaying the current state of the cache before starting + // to observe the actual source changeSrc. ChangeSrc is backed by a stream.FuncObservable, that will start its own + // go routine. Therefore, the current go routine will stop and free the lock on the mutex after the registration. + go func() { + // Wait until initial listing has completed before + // replaying the state. + select { + case <-c.listDone: + case <-ctx.Done(): + complete(ctx.Err()) + return + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for id, key := range c.cache { + next(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key}) + } + + // Emit a sync event to inform the subscriber that it has received a consistent + // initial state. + next(AllocatorChange{Kind: AllocatorChangeSync}) + + // And subscribe to new events. Since we held the read-lock there won't be any + // missed or duplicate events. + c.changeSrc.Observe(ctx, next, complete) + }() + +} diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/doc.go b/vendor/github.com/cilium/cilium/pkg/allocator/doc.go new file mode 100644 index 0000000000..26c9a9aad1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/allocator/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package allocator provides a kvstore based ID allocator +package allocator diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go b/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go new file mode 100644 index 0000000000..48820d7366 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +import ( + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/lock" +) + +type localKey struct { + val idpool.ID + key AllocatorKey + refcnt uint64 + + // verified is true when the key has been synced with the kvstore + verified bool +} + +// localKeys is a map of keys in use locally. Keys can be used multiple times. +// A refcnt is managed to know when a key is no longer in use +type localKeys struct { + lock.RWMutex + keys map[string]*localKey + ids map[idpool.ID]*localKey +} + +func newLocalKeys() *localKeys { + return &localKeys{ + keys: map[string]*localKey{}, + ids: map[idpool.ID]*localKey{}, + } +} + +// allocate creates an entry for key in localKeys if needed and increments the +// refcnt. The value associated with the key must match the local cache or an +// error is returned +func (lk *localKeys) allocate(keyString string, key AllocatorKey, val idpool.ID) (idpool.ID, bool, error) { + lk.Lock() + defer lk.Unlock() + + var firstUse bool + + if k, ok := lk.keys[keyString]; ok { + if val != k.val { + return idpool.NoID, firstUse, fmt.Errorf("local key already allocated with different value (%s != %s)", val, k.val) + } + + k.refcnt++ + kvstore.Trace("Incremented local key refcnt", nil, logrus.Fields{fieldKey: keyString, fieldID: val, fieldRefCnt: k.refcnt}) + return k.val, firstUse, nil + } + + firstUse = true + k := &localKey{key: key, val: val, refcnt: 1} + lk.keys[keyString] = k + lk.ids[val] = k + kvstore.Trace("New local key", nil, logrus.Fields{fieldKey: keyString, fieldID: val, fieldRefCnt: 1}) + return val, firstUse, nil +} + +func (lk *localKeys) verify(key string) error { + lk.Lock() + defer lk.Unlock() + + if k, ok := lk.keys[key]; ok { + k.verified = true + kvstore.Trace("Local key verified", nil, logrus.Fields{fieldKey: key}) + return nil + } + + return fmt.Errorf("key %s not found", key) +} + +// lookupKey returns the idpool.ID of the key is present in the map of keys. +// if it isn't present, returns idpool.NoID +func (lk *localKeys) lookupKey(key string) idpool.ID { + lk.RLock() + defer lk.RUnlock() + + if k, ok := lk.keys[key]; ok { + return k.val + } + + return idpool.NoID +} + +// lookupID returns the key for a given ID or an empty string +func (lk *localKeys) lookupID(id idpool.ID) AllocatorKey { + lk.RLock() + defer lk.RUnlock() + + if k, ok := lk.ids[id]; ok { + return k.key + } + + return nil +} + +// use increments the refcnt of the key and returns its value +func (lk *localKeys) use(key string) idpool.ID { + lk.Lock() + defer lk.Unlock() + + if k, ok := lk.keys[key]; ok { + // unverified keys behave as if they do not exist + if !k.verified { + return idpool.NoID + } + + k.refcnt++ + kvstore.Trace("Incremented local key refcnt", nil, logrus.Fields{fieldKey: key, fieldID: k.val, fieldRefCnt: k.refcnt}) + return k.val + } + + return idpool.NoID +} + +// release releases the refcnt of a key. It returns the ID associated with the +// given key. When the last reference was released, the key is deleted and the +// returned lastUse value is true. +func (lk *localKeys) release(key string) (lastUse bool, id idpool.ID, err error) { + lk.Lock() + defer lk.Unlock() + if k, ok := lk.keys[key]; ok { + k.refcnt-- + kvstore.Trace("Decremented local key refcnt", nil, logrus.Fields{fieldKey: key, fieldID: k.val, fieldRefCnt: k.refcnt}) + if k.refcnt == 0 { + delete(lk.keys, key) + delete(lk.ids, k.val) + return true, k.val, nil + } + + return false, k.val, nil + } + + return false, idpool.NoID, fmt.Errorf("unable to find key in local cache") +} + +func (lk *localKeys) getVerifiedIDs() map[idpool.ID]AllocatorKey { + ids := map[idpool.ID]AllocatorKey{} + lk.RLock() + for id, localKey := range lk.ids { + if localKey.verified { + ids[id] = localKey.key + } + } + lk.RUnlock() + + return ids +} diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go b/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go new file mode 100644 index 0000000000..59df554058 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +const ( + fieldID = "id" + fieldKey = "key" + fieldRefCnt = "refcnt" +) diff --git a/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go new file mode 100644 index 0000000000..87242d8a93 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package annotation + +import ( + "regexp" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // Prefix is the common prefix for all annotations + Prefix = "io.cilium" + + // ConfigPrefix is the common prefix for configuration related annotations. + ConfigPrefix = "config.cilium.io" + + // IngressPrefix is the common prefix for ingress related annotations. + IngressPrefix = "ingress.cilium.io" + + // NetworkPrefix is the common prefix for network related annotations. + NetworkPrefix = "network.cilium.io" + + // PolicyPrefix is the common prefix for policy related annotations. + PolicyPrefix = "policy.cilium.io" + + // ServicePrefix is the common prefix for service related annotations. + ServicePrefix = "service.cilium.io" + + // IPAMPrefix is the common prefix for IPAM related annotations. + IPAMPrefix = "ipam.cilium.io" + + // PolicyName / PolicyNameAlias is an optional annotation to the NetworkPolicy + // resource which specifies the name of the policy node to which all + // rules should be applied to. + PolicyName = PolicyPrefix + "/name" + PolicyNameAlias = Prefix + ".name" + + // V4CIDRName / V4CIDRNameAlias is the annotation name used to store the IPv4 + // pod CIDR in the node's annotations. + V4CIDRName = NetworkPrefix + "/ipv4-pod-cidr" + V4CIDRNameAlias = Prefix + ".network.ipv4-pod-cidr" + // V6CIDRName / V6CIDRNameAlias is the annotation name used to store the IPv6 + // pod CIDR in the node's annotations. + V6CIDRName = NetworkPrefix + "/ipv6-pod-cidr" + V6CIDRNameAlias = Prefix + ".network.ipv6-pod-cidr" + + // V4HealthName / V4HealthNameAlias is the annotation name used to store the + // IPv4 address of the cilium-health endpoint in the node's annotations. + V4HealthName = NetworkPrefix + "/ipv4-health-ip" + V4HealthNameAlias = Prefix + ".network.ipv4-health-ip" + // V6HealthName / V6HealthNameAlias is the annotation name used to store the + // IPv6 address of the cilium-health endpoint in the node's annotations. + V6HealthName = NetworkPrefix + "/ipv6-health-ip" + V6HealthNameAlias = Prefix + ".network.ipv6-health-ip" + + // V4IngressName / V4IngressNameAlias is the annotation name used to store + // the IPv4 address of the Ingress listener in the node's annotations. + V4IngressName = NetworkPrefix + "/ipv4-Ingress-ip" + V4IngressNameAlias = Prefix + ".network.ipv4-Ingress-ip" + // V6IngressName / V6IngressNameAlias is the annotation name used to store + // the IPv6 address of the Ingress listener in the node's annotations. + V6IngressName = NetworkPrefix + "/ipv6-Ingress-ip" + V6IngressNameAlias = Prefix + ".network.ipv6-Ingress-ip" + + // CiliumHostIP / CiliumHostIPAlias is the annotation name used to store the + // IPv4 address of the cilium host interface in the node's annotations. + CiliumHostIP = NetworkPrefix + "/ipv4-cilium-host" + CiliumHostIPAlias = Prefix + ".network.ipv4-cilium-host" + + // CiliumHostIPv6 / CiliumHostIPv6Alias is the annotation name used to store + // the IPv6 address of the cilium host interface in the node's annotation. + CiliumHostIPv6 = NetworkPrefix + "/ipv6-cilium-host" + CiliumHostIPv6Alias = Prefix + ".network.ipv6-cilium-host" + + // CiliumEncryptionKey / CiliumEncryptionKeyAlias is the annotation name used to + // store the encryption key of the cilium host interface in the node's annotation. + CiliumEncryptionKey = NetworkPrefix + "/encryption-key" + CiliumEncryptionKeyAlias = Prefix + ".network.encryption-key" + + // GlobalService / GlobalServiceAlias if set to true, marks a service to + // become a global service. + GlobalService = ServicePrefix + "/global" + GlobalServiceAlias = Prefix + "/global-service" + + // SharedService / SharedServiceAlias if set to false, prevents a service + // from being shared, the default is true if GlobalService is set, otherwise + // false. Setting the annotation SharedService to false while setting + // GlobalService to true allows to expose remote endpoints without + // sharing local endpoints. + SharedService = ServicePrefix + "/shared" + SharedServiceAlias = Prefix + "/shared-service" + + // ServiceAffinity / ServiceAffinityAlias annotations determines the + // preferred endpoint destination. + // Allowed values: + // - local + // preferred endpoints from local cluster if available + // - remote + // preferred endpoints from remote cluster if available + // - none (default) + // no preference. Default behavior if this annotation does not exist + ServiceAffinity = ServicePrefix + "/affinity" + ServiceAffinityAlias = Prefix + "/service-affinity" + + // ProxyVisibility / ProxyVisibilityAlias is the annotation name used to + // indicate whether proxy visibility should be enabled for a given pod (i.e., + // all traffic for the pod is redirected to the proxy for the given port / + // protocol in the annotation + ProxyVisibility = PolicyPrefix + "/proxy-visibility" + ProxyVisibilityAlias = Prefix + ".proxy-visibility" + + // NoTrack / NoTrackAlias is the annotation name used to store the port and + // protocol that we should bypass kernel conntrack for a given pod. This + // applies for both TCP and UDP connection. Current use case is NodeLocalDNS. + NoTrack = PolicyPrefix + "/no-track-port" + NoTrackAlias = Prefix + ".no-track-port" + + // WireguardPubKey / WireguardPubKeyAlias is the annotation name used to store + // the WireGuard public key in the CiliumNode CRD that we need to use to encrypt + // traffic to that node. + WireguardPubKey = NetworkPrefix + "/wg-pub-key" + WireguardPubKeyAlias = Prefix + ".network.wg-pub-key" + + // BGPVRouterAnnoPrefix is the prefix used for all Virtual Router annotations + // Its just a prefix, because the ASN of the Router is part of the annotation itself + BGPVRouterAnnoPrefix = "cilium.io/bgp-virtual-router." + + // IPAMPoolKey is the annotation name used to store the IPAM pool name from + // which workloads should allocate their IP from + IPAMPoolKey = IPAMPrefix + "/ip-pool" + + // IPAMIPv4PoolKey is the annotation name used to store the IPAM IPv4 pool name from + // which workloads should allocate their IP from + IPAMIPv4PoolKey = IPAMPrefix + "/ipv4-pool" + + // IPAMIPv6PoolKey is the annotation name used to store the IPAM IPv6 pool name from + // which workloads should allocate their IP from + IPAMIPv6PoolKey = IPAMPrefix + "/ipv6-pool" +) + +var ( + // CiliumPrefixRegex is a regex matching Cilium specific annotations. + CiliumPrefixRegex = regexp.MustCompile(`^([A-Za-z0-9]+\.)*cilium.io/`) +) + +// Get returns the annotation value associated with the given key, or any of +// the additional aliases if not found. +func Get(obj metav1.Object, key string, aliases ...string) (value string, ok bool) { + keys := append([]string{key}, aliases...) + for _, k := range keys { + if value, ok = obj.GetAnnotations()[k]; ok { + return value, ok + } + } + + return "", false +} diff --git a/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go b/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go new file mode 100644 index 0000000000..2cfbde3dca --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package backoff + +import ( + "context" + "fmt" + "math" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/rand" + "github.com/cilium/cilium/pkg/time" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "backoff") + + randGen = rand.NewSafeRand(time.Now().UnixNano()) +) + +// NodeManager is the interface required to implement cluster size dependent +// intervals +type NodeManager interface { + ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration +} + +// nodeManager is a wrapper to enable using a plain function as NodeManager to implement +// cluster size dependent intervals +type nodeManager struct { + clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration +} + +// NewNodeManager returns a new NodeManager implementing cluster size dependent intervals +// based on the given function. If the function is nil, then no tuning is performed. +func NewNodeManager(clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration) NodeManager { + return &nodeManager{clusterSizeDependantInterval: clusterSizeDependantInterval} +} + +func (n *nodeManager) ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration { + if n.clusterSizeDependantInterval == nil { + return baseInterval + } + + return n.clusterSizeDependantInterval(baseInterval) +} + +// Exponential implements an exponential backoff +type Exponential struct { + // Min is the minimal backoff time, if unspecified, 1 second will be + // used + Min time.Duration + + // Max is the maximum backoff time, if unspecified, no maximum time is + // applied + Max time.Duration + + // Factor is the factor the backoff time grows exponentially, if + // unspecified, a factor of 2.0 will be used + Factor float64 + + // Jitter, when enabled, adds random jitter to the interval + Jitter bool + + // NodeManager enables the use of cluster size dependent backoff + // intervals, i.e. the larger the cluster, the longer the backoff + // interval + NodeManager NodeManager + + // Name is a free form string describing the operation subject to the + // backoff, if unspecified, a UUID is generated. This string is used + // for logging purposes. + Name string + + // ResetAfter will reset the exponential back-off if no attempt is made for the amount of time specified here. + // Needs to be larger than the Max duration, otherwise it will be ignored to avoid accidental resets. + // If unspecified, no reset is performed. + ResetAfter time.Duration + + lastBackoffStart time.Time + + attempt int +} + +// CalculateDuration calculates the backoff duration based on minimum base +// interval, exponential factor, jitter and number of failures. +func CalculateDuration(min, max time.Duration, factor float64, jitter bool, failures int) time.Duration { + minFloat := float64(min) + maxFloat := float64(max) + + t := minFloat * math.Pow(factor, float64(failures)) + if max != time.Duration(0) && t > maxFloat { + t = maxFloat + } + + if jitter { + t = randGen.Float64()*(t-minFloat) + minFloat + } + + return time.Duration(t) +} + +// ClusterSizeDependantInterval returns a time.Duration that is dependent on +// the cluster size, i.e. the number of nodes that have been discovered. This +// can be used to control sync intervals of shared or centralized resources to +// avoid overloading these resources as the cluster grows. +// +// Example sync interval with baseInterval = 1 * time.Minute +// +// nodes | sync interval +// ------+----------------- +// 1 | 41.588830833s +// 2 | 1m05.916737320s +// 4 | 1m36.566274746s +// 8 | 2m11.833474640s +// 16 | 2m49.992800643s +// 32 | 3m29.790453687s +// 64 | 4m10.463236193s +// 128 | 4m51.588744261s +// 256 | 5m32.944565093s +// 512 | 6m14.416550710s +// 1024 | 6m55.946873494s +// 2048 | 7m37.506428894s +// 4096 | 8m19.080616652s +// 8192 | 9m00.662124608s +// 16384 | 9m42.247293667s +func ClusterSizeDependantInterval(baseInterval time.Duration, numNodes int) time.Duration { + // no nodes are being managed, no work will be performed, return + // baseInterval to check again in a reasonable timeframe + if numNodes == 0 { + return baseInterval + } + + waitNanoseconds := float64(baseInterval.Nanoseconds()) * math.Log1p(float64(numNodes)) + return time.Duration(int64(waitNanoseconds)) +} + +// Reset backoff attempt counter +func (b *Exponential) Reset() { + b.attempt = 0 +} + +// Wait waits for the required time using an exponential backoff +func (b *Exponential) Wait(ctx context.Context) error { + if resetDuration := b.ResetAfter; resetDuration != time.Duration(0) && resetDuration > b.Max { + if !b.lastBackoffStart.IsZero() { + if time.Since(b.lastBackoffStart) > resetDuration { + b.Reset() + } + } + } + + b.lastBackoffStart = time.Now() + b.attempt++ + t := b.Duration(b.attempt) + + log.WithFields(logrus.Fields{ + "time": t, + "attempt": b.attempt, + "name": b.Name, + }).Debug("Sleeping with exponential backoff") + + select { + case <-ctx.Done(): + return fmt.Errorf("exponential backoff cancelled via context: %s", ctx.Err()) + case <-time.After(t): + } + + return nil +} + +// Duration returns the wait duration for the nth attempt +func (b *Exponential) Duration(attempt int) time.Duration { + if b.Name == "" { + b.Name = uuid.New().String() + } + + min := time.Duration(1) * time.Second + if b.Min != time.Duration(0) { + min = b.Min + } + + factor := float64(2) + if b.Factor != float64(0) { + factor = b.Factor + } + + t := CalculateDuration(min, b.Max, factor, b.Jitter, attempt) + + if b.NodeManager != nil { + t = b.NodeManager.ClusterSizeDependantInterval(t) + } + + if b.Max != time.Duration(0) && t > b.Max { + t = b.Max + } + + return t +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/annotations.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/annotations.go new file mode 100644 index 0000000000..a526866ce3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/annotations.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package agent + +import ( + "errors" + "fmt" + "math" + "net/netip" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/annotation" +) + +// ErrNotVRouterAnno is an error returned from parseAnnotation() when the +// the casted string is not a `cilium.io/bgp-virtual-router` annotation +type ErrNotVRouterAnno struct { + a string +} + +func (e ErrNotVRouterAnno) Error() string { + return "annotation " + e.a + " is not a valid cilium.io/bgp-virtual-router annotation" +} + +// ErrNoASNAnno is an error returned from parseAnnotation() when the bgp-virtual-router +// annotation does not include a local ASN. +type ErrNoASNAnno struct { + a string +} + +func (e ErrNoASNAnno) Error() string { + return "annotation " + e.a + " provides no asn" +} + +// ErrASN is an error returned from parseAnnotation() when the bgp-virtual-router +// annotation includes an ASN that cannot be parsed into an +type ErrASNAnno struct { + err string + asn string + anno string +} + +func (e ErrASNAnno) Error() string { + return "ASN" + e.asn + " in annotation " + e.anno + " cannot be parsed into integer: " + e.err +} + +// ErrAttrib is an error returned from parseAnnotation() when an attribute is +// provided but its value is malformed. +type ErrAttrib struct { + anno string + attr string + err string +} + +func (e ErrAttrib) Error() string { + return "annotation " + e.anno + " failed to parse attribute " + e.attr + ":" + e.err +} + +// The BGP control plane may need some node-specific configuration for +// instantiating virtual routers. +// +// For example, BGP router IDs cannot repeat in a BGP peering topology. +// When Cilium cannot generate a unique router ID it will look for a unique +// router ID for the virtual router identified by its local ASN. +// +// We define a set of attributes which can be defined via Node-specific +// kubernetes annotations. +// +// This Kubernetes annotation's syntax is: +// `cilium.io/bgp-virtual-router.{asn}="attr1=value1,attr2=value2" +// +// Where {asn} is replaced by the local ASN of the virtual router. +// +// Currently supported attributes are: +// +// router-id=IPv4 (string): when present on a specific node, use this value for +// the router ID of the virtual router with local {asn} +// local-port=port (int): the local port to listen on for incoming BGP connections +type Attributes struct { + // The local ASN of the virtual router these Attributes targets. + ASN int64 + // The router ID to use for the virtual router with the above local ASN. + RouterID string + // The local BGP port to listen on. + LocalPort int32 +} + +// AnnotationMap coorelates a parsed Annotations structure with the local +// ASN its annotating. +type AnnotationMap map[int64]Attributes + +// ErrMulti holds multiple errors and formats them sanely when printed. +type ErrMulti struct { + errs []error +} + +func (e ErrMulti) Error() string { + s := strings.Builder{} + for _, err := range e.errs { + s.WriteString(err.Error() + ",") + } + return s.String() +} + +func (a AnnotationMap) ResolveRouterID(localASN int64) (string, error) { + if _, ok := a[localASN]; ok { + var err error + var parsed netip.Addr + if parsed, err = netip.ParseAddr(a[localASN].RouterID); err == nil && !parsed.IsUnspecified() { + return parsed.String(), nil + } + return "", fmt.Errorf("failed to parse RouterID for local ASN %v: %w", localASN, err) + } + return "", fmt.Errorf("router id not specified by annotation, cannot resolve router id for local ASN %v", localASN) +} + +// NewAnnotationMap parses a Node's annotations into a AnnotationMap +// and returns the latter. +// +// An error is returned containing one or more parsing errors. +// +// This is for convenience so the caller can log all parsing errors at once. +// The error should still be treated as a normal descrete error and an empty +// AnnotationMap is returned. +func NewAnnotationMap(a map[string]string) (AnnotationMap, error) { + am := AnnotationMap{} + errs := make([]error, 0, len(a)) + for key, value := range a { + asn, attrs, err := parseAnnotation(key, value) + if err != nil && !errors.As(err, &ErrNotVRouterAnno{}) { + errs = append(errs, err) + continue + } + am[asn] = attrs + } + if len(errs) > 0 { + return am, ErrMulti{errs} + } + return am, nil +} + +// parseAnnotation will attempt to parse a `cilium.io/bgp-virtual-router` +// annotation into an Attributes structure. +// +// Errors returned by this parse method are defined at top of file. +func parseAnnotation(key string, value string) (int64, Attributes, error) { + var out Attributes + // is this an annotation we care about? + if !strings.HasPrefix(key, annotation.BGPVRouterAnnoPrefix) { + return 0, out, ErrNotVRouterAnno{key} + } + + // parse out asn from annotation key, if split at "." will be 3rd element + var asn int64 + if anno := strings.Split(key, "."); len(anno) != 3 { + return 0, out, ErrNoASNAnno{key} + } else { + var err error + asn, err = strconv.ParseInt(anno[2], 10, 64) + if err != nil { + return 0, out, ErrASNAnno{} + } + } + out.ASN = asn + + // split annotation value into multiple "key=value" formatted attributes. + attrs := strings.Split(value, ",") + if len(attrs) == 0 { + return 0, out, nil // empty attributes, not an error + } + // parse string attributes into Attributes structure. + for _, attr := range attrs { + kv := strings.Split(attr, "=") + if len(kv) != 2 { + continue + } + switch kv[0] { + case "router-id": + addr, _ := netip.ParseAddr(kv[1]) + if addr.IsUnspecified() { + return 0, out, ErrAttrib{key, kv[0], "could not parse in an IPv4 address"} + } + out.RouterID = kv[1] + case "local-port": + port, err := strconv.ParseInt(kv[1], 10, 0) + if err != nil { + return 0, out, ErrAttrib{key, kv[0], "could not parse into port number"} + } + if port > math.MaxUint16 { + return 0, out, ErrAttrib{key, kv[0], "local port must be smaller then 65535"} + } + out.LocalPort = int32(port) + } + } + return asn, out, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/controller.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/controller.go new file mode 100644 index 0000000000..9bd2c34c8b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/controller.go @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package agent + +import ( + "context" + "fmt" + "runtime/pprof" + + "github.com/sirupsen/logrus" + "k8s.io/client-go/util/workqueue" + + daemon_k8s "github.com/cilium/cilium/daemon/k8s" + "github.com/cilium/cilium/pkg/bgpv1/agent/signaler" + "github.com/cilium/cilium/pkg/hive" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/hive/job" + v2_api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v2alpha1api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "github.com/cilium/cilium/pkg/k8s/resource" + slimlabels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" + slimmetav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "bgp-control-plane") +) + +var ( + // ErrMultiplePolicies is a static error typed when the controller encounters + // multiple policies which apply to its host. + ErrMultiplePolicies = fmt.Errorf("more then one CiliumBGPPeeringPolicy applies to this node, please ensure only a single Policy matches this node's labels") + + // ErrBGPControlPlaneDisabled is set when the BGP control plane is disabled + ErrBGPControlPlaneDisabled = fmt.Errorf("BGP control plane is disabled") +) + +type policyLister interface { + List() ([]*v2alpha1api.CiliumBGPPeeringPolicy, error) +} + +type policyListerFunc func() ([]*v2alpha1api.CiliumBGPPeeringPolicy, error) + +func (plf policyListerFunc) List() ([]*v2alpha1api.CiliumBGPPeeringPolicy, error) { + return plf() +} + +// Controller is the agent side BGP Control Plane controller. +// +// Controller listens for events and drives BGP related sub-systems +// to maintain a desired state. +type Controller struct { + // CiliumNodeResource provides a stream of events for changes to the local CiliumNode resource. + CiliumNodeResource daemon_k8s.LocalCiliumNodeResource + // LocalCiliumNode is the CiliumNode object for the local node. + LocalCiliumNode *v2_api.CiliumNode + // PolicyResource provides a store of cached policies and allows us to observe changes to the objects in its + // store. + PolicyResource resource.Resource[*v2alpha1api.CiliumBGPPeeringPolicy] + // PolicyLister is an interface which allows for the listing of all known policies + PolicyLister policyLister + + // Sig informs the Controller that a Kubernetes + // event of interest has occurred. + // + // The signal itself provides no other information, + // when it occurs the Controller will query each + // informer for the latest API information required + // to drive it's control loop. + Sig *signaler.BGPCPSignaler + // BGPMgr is an implementation of the BGPRouterManager interface + // and provides a declarative API for configuring BGP peers. + BGPMgr BGPRouterManager +} + +// ControllerParams contains all parameters needed to construct a Controller +type ControllerParams struct { + cell.In + + Lifecycle cell.Lifecycle + Scope cell.Scope + JobRegistry job.Registry + Shutdowner hive.Shutdowner + Sig *signaler.BGPCPSignaler + RouteMgr BGPRouterManager + PolicyResource resource.Resource[*v2alpha1api.CiliumBGPPeeringPolicy] + DaemonConfig *option.DaemonConfig + LocalCiliumNodeResource daemon_k8s.LocalCiliumNodeResource +} + +// NewController constructs a new BGP Control Plane Controller. +// +// When the constructor returns the Controller will be actively watching for +// events and configuring BGP related sub-systems. +// +// The constructor requires an implementation of BGPRouterManager to be provided. +// This implementation defines which BGP backend will be used (GoBGP, FRR, Bird, etc...) +// NOTE: only GoBGP currently implemented. +func NewController(params ControllerParams) (*Controller, error) { + // If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static + // regardless of config. The lifecycle has not been appended so no work will be done. + if !params.DaemonConfig.BGPControlPlaneEnabled() { + return nil, nil + } + + c := &Controller{ + Sig: params.Sig, + BGPMgr: params.RouteMgr, + PolicyResource: params.PolicyResource, + CiliumNodeResource: params.LocalCiliumNodeResource, + } + + jobGroup := params.JobRegistry.NewGroup( + params.Scope, + job.WithLogger(log), + job.WithPprofLabels(pprof.Labels("cell", "bgp-cp")), + ) + + jobGroup.Add( + job.OneShot("bgp-policy-observer", func(ctx context.Context, health cell.HealthReporter) (err error) { + for ev := range c.PolicyResource.Events(ctx) { + switch ev.Kind { + case resource.Upsert, resource.Delete: + // Signal the reconciliation logic. + c.Sig.Event(struct{}{}) + } + ev.Done(nil) + } + return nil + }), + + job.OneShot("cilium-node-observer", func(ctx context.Context, health cell.HealthReporter) (err error) { + for ev := range c.CiliumNodeResource.Events(ctx) { + switch ev.Kind { + case resource.Upsert: + // Set the local CiliumNode. + c.LocalCiliumNode = ev.Object + // Signal the reconciliation logic. + c.Sig.Event(struct{}{}) + } + ev.Done(nil) + } + return nil + }), + + job.OneShot("bgp-controller", func(ctx context.Context, health cell.HealthReporter) (err error) { + // initialize PolicyLister used in the controller + policyStore, err := c.PolicyResource.Store(ctx) + if err != nil { + return fmt.Errorf("error creating CiliumBGPPeeringPolicy resource store: %w", err) + } + c.PolicyLister = policyListerFunc(func() ([]*v2alpha1api.CiliumBGPPeeringPolicy, error) { + return policyStore.List(), nil + }) + // run the controller + c.Run(ctx) + return nil + }, job.WithRetry(3, workqueue.DefaultControllerRateLimiter()), job.WithShutdown()), + ) + + params.Lifecycle.Append(jobGroup) + return c, nil +} + +// Run places the Controller into its control loop. +// +// When new events trigger a signal the control loop will be evaluated. +// +// A cancel of the provided ctx will kill the control loop along with the running +// informers. +func (c *Controller) Run(ctx context.Context) { + var ( + l = log.WithFields(logrus.Fields{ + "component": "Controller.Run", + }) + ) + + // add an initial signal to kick things off + c.Sig.Event(struct{}{}) + + l.Info("Cilium BGP Control Plane Controller now running...") + for { + select { + case <-ctx.Done(): + l.Info("Cilium BGP Control Plane Controller shut down") + return + case <-c.Sig.Sig: + l.Info("Cilium BGP Control Plane Controller woken for reconciliation") + if err := c.Reconcile(ctx); err != nil { + l.WithError(err).Error("Encountered error during reconciliation") + } else { + l.Debug("Successfully completed reconciliation") + } + } + } +} + +// PolicySelection returns a CiliumBGPPeeringPolicy which applies to the provided +// *corev1.Node, enforced by a set of policy selection rules. +// +// Policy selection follows the following rules: +// - A policy matches a node if said policy's "nodeSelector" field matches +// the node's labels. If "nodeSelector" is omitted, it is unconditionally +// selected. +// - If (N > 1) policies match the provided *corev1.Node an error is returned. +// only a single policy may apply to a node to avoid ambiguity at this stage +// of development. +func PolicySelection(ctx context.Context, labels map[string]string, policies []*v2alpha1api.CiliumBGPPeeringPolicy) (*v2alpha1api.CiliumBGPPeeringPolicy, error) { + var ( + l = log.WithFields(logrus.Fields{ + "component": "PolicySelection", + }) + + // determine which policies match our node's labels. + selectedPolicy *v2alpha1api.CiliumBGPPeeringPolicy + slimLabels = slimlabels.Set(labels) + ) + + // range over policies and see if any match this node's labels. + // + // for now, only a single BGP policy can be applied to a node. if more than + // one policy applies to a node, we disconnect from all BGP peers and log + // an error. + for _, policy := range policies { + var selected bool + + l.WithFields(logrus.Fields{ + "policyName": policy.Name, + "nodeLabels": slimLabels, + "policyNodeSelector": policy.Spec.NodeSelector.String(), + }).Debug("Comparing BGP policy node selector with node's labels") + + if policy.Spec.NodeSelector == nil { + selected = true + } else { + nodeSelector, err := slimmetav1.LabelSelectorAsSelector(policy.Spec.NodeSelector) + if err != nil { + l.WithError(err).Error("Failed to convert CiliumBGPPeeringPolicy's NodeSelector to a label.Selector interface") + continue + } + if nodeSelector.Matches(slimLabels) { + selected = true + } + } + + if selected { + if selectedPolicy != nil { + return nil, ErrMultiplePolicies + } + selectedPolicy = policy + } + } + + return selectedPolicy, nil +} + +// Reconcile is the control loop for the Controller. +// +// Reconcile will be invoked when one or more event sources trigger a signal +// via the Controller's Signaler structure. +// +// On signal, Reconcile will obtain the state of the world necessary to drive +// the BGP control plane toward any new BGP peering policies. +// +// Reconcile will only allow a single CiliumBGPPeeringPolicy to apply to the +// node its running on. +func (c *Controller) Reconcile(ctx context.Context) error { + var ( + l = log.WithFields(logrus.Fields{ + "component": "Controller.Reconcile", + }) + ) + + if c.LocalCiliumNode == nil { + return fmt.Errorf("attempted reconciliation with nil local CiliumNode") + } + if c.PolicyLister == nil { + return fmt.Errorf("attempted reconciliation with nil PolicyLister") + } + + // retrieve all CiliumBGPPeeringPolicies + policies, err := c.PolicyLister.List() + if err != nil { + return fmt.Errorf("failed to list CiliumBGPPeeringPolicies") + } + l.WithField("count", len(policies)).Debug("Successfully listed CiliumBGPPeeringPolicies") + + // perform policy selection based on node. + labels := c.LocalCiliumNode.Labels + policy, err := PolicySelection(ctx, labels, policies) + if err != nil { + l.WithError(err).Error("Policy selection failed") + c.FullWithdrawal(ctx) + return err + } + if policy == nil { + // no policy was discovered, tell router manager to withdrawal peers if + // they are configured. + l.Debug("No BGP peering policy applies to this node, any existing BGP sessions will be removed.") + c.FullWithdrawal(ctx) + return nil + } + + // apply policy defaults to have consistent default config across sub-systems + policy = policy.DeepCopy() // deepcopy to not modify the policy object in store + policy.SetDefaults() + + err = c.validatePolicy(policy) + if err != nil { + return fmt.Errorf("invalid BGP peering policy %s: %w", policy.Name, err) + } + + // call bgp sub-systems required to apply this policy's BGP topology. + l.Debug("Asking configured BGPRouterManager to configure peering") + if err := c.BGPMgr.ConfigurePeers(ctx, policy, c.LocalCiliumNode); err != nil { + return fmt.Errorf("failed to configure BGP peers, cannot apply BGP peering policy: %w", err) + } + + return nil +} + +// FullWithdrawal will instruct the configured BGPRouterManager to withdraw all +// BGP servers and peers. +func (c *Controller) FullWithdrawal(ctx context.Context) { + _ = c.BGPMgr.ConfigurePeers(ctx, nil, nil) // cannot fail, no need for error handling +} + +// validatePolicy validates the CiliumBGPPeeringPolicy. +// The validation is normally done by kube-apiserver (based on CRD validation markers), +// this validates only those constraints that cannot be enforced by them. +func (c *Controller) validatePolicy(policy *v2alpha1api.CiliumBGPPeeringPolicy) error { + for _, r := range policy.Spec.VirtualRouters { + for _, n := range r.Neighbors { + if err := n.Validate(); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/mock.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/mock.go new file mode 100644 index 0000000000..b10bdcd3c4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/mock.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package agent + +import ( + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" +) + +var _ policyLister = (*MockCiliumBGPPeeringPolicyLister)(nil) + +type MockCiliumBGPPeeringPolicyLister struct { + List_ func() ([]*v2alpha1.CiliumBGPPeeringPolicy, error) +} + +func (m *MockCiliumBGPPeeringPolicyLister) List() ([]*v2alpha1.CiliumBGPPeeringPolicy, error) { + return m.List_() +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/routermgr.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/routermgr.go new file mode 100644 index 0000000000..94a0ec4144 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/routermgr.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package agent + +import ( + "context" + + "github.com/cilium/cilium/api/v1/models" + restapi "github.com/cilium/cilium/api/v1/server/restapi/bgp" + v2api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v2alpha1api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" +) + +// BGPRouterManager provides a declarative API for defining +// BGP peers. +type BGPRouterManager interface { + // ConfigurePeers evaluates the provided CiliumBGPPeeringPolicy + // and the implementation will configure itself to apply this policy. + // + // A ControllerState structure is provided which captures Cilium's runtime + // state at the time of this method's invocation. It must remain read-only. + // + // ConfigurePeers should block until it can ensure a subsequent call + // to ConfigurePeers can occur without conflict. + // + // ConfigurePeers should not be called concurrently and expects invocations + // to be serialized contingent to the method's completion. + // + // An error is returned only when the implementation can determine a + // critical flaw with the peering policy, not when network connectivity + // is an issue. + // + // Providing a nil policy to ConfigurePeers will withdrawal all routes + // and disconnect from the peers. + ConfigurePeers(ctx context.Context, policy *v2alpha1api.CiliumBGPPeeringPolicy, ciliumNode *v2api.CiliumNode) error + + // GetPeers fetches BGP peering state from underlying routing daemon. + // + // List of all peers will be returned and if there are multiple instances of + // BGP daemon running locally, then peers can be differentiated based on + // local AS number. + GetPeers(ctx context.Context) ([]*models.BgpPeer, error) + + // GetRoutes fetches BGP routes from underlying routing daemon's RIBs. + GetRoutes(ctx context.Context, params restapi.GetBgpRoutesParams) ([]*models.BgpRoute, error) + + // GetRoutePolicies fetches BGP routing policies from underlying routing daemon. + GetRoutePolicies(ctx context.Context, params restapi.GetBgpRoutePoliciesParams) ([]*models.BgpRoutePolicy, error) + + // Stop will stop all BGP instances and clean up local state. + Stop() +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/signaler/signaler.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/signaler/signaler.go new file mode 100644 index 0000000000..5d103ae61e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/agent/signaler/signaler.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium +package signaler + +// BGPCPSignaler multiplexes multiple event sources into a single level-triggered +// event instructing the BGP Control Plane Controller to perform reconciliation. +// +// BGPCPSignaler should always be constructed with a channel of size 1. +// +// Use of a BGPCPSignaler allows for bursts of events to be "rolled-up". +// This is a suitable approach since the Controller checks the entire state of +// the world on each iteration of its control loop. +// +// Additionally, this precludes any need for ordering between different event +// sources. +type BGPCPSignaler struct { + Sig chan struct{} +} + +// NewSignaler constructs a Signaler +func NewBGPCPSignaler() *BGPCPSignaler { + return &BGPCPSignaler{ + Sig: make(chan struct{}, 1), + } +} + +// Event adds an edge triggered event to the Signaler. +// +// A controller which uses this Signaler will be notified of this event some +// time after. +// +// This signature adheres to the common event handling signatures of +// cache.ResourceEventHandlerFuncs for convenience. +func (s BGPCPSignaler) Event(_ interface{}) { + select { + case s.Sig <- struct{}{}: + default: + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_peer.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_peer.go new file mode 100644 index 0000000000..68f3251120 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_peer.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package api + +import ( + "fmt" + "net/http" + + "github.com/go-openapi/runtime/middleware" + + restapi "github.com/cilium/cilium/api/v1/server/restapi/bgp" + "github.com/cilium/cilium/pkg/api" + "github.com/cilium/cilium/pkg/bgpv1/agent" +) + +type BGPHandlerInParams struct { + Controller *agent.Controller +} + +func NewGetPeerHandler(c *agent.Controller) restapi.GetBgpPeersHandler { + return &getPeerHandler{ + controller: c, + } +} + +type getPeerHandler struct { + controller *agent.Controller +} + +func (h *getPeerHandler) Handle(params restapi.GetBgpPeersParams) middleware.Responder { + if h.controller == nil { + return api.Error(http.StatusNotImplemented, agent.ErrBGPControlPlaneDisabled) + } + peers, err := h.controller.BGPMgr.GetPeers(params.HTTPRequest.Context()) + if err != nil { + return api.Error(http.StatusInternalServerError, fmt.Errorf("failed to get peers: %w", err)) + } + return restapi.NewGetBgpPeersOK().WithPayload(peers) +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_route_policies.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_route_policies.go new file mode 100644 index 0000000000..186d4497d4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_route_policies.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package api + +import ( + "fmt" + "net/http" + + "github.com/go-openapi/runtime/middleware" + + restapi "github.com/cilium/cilium/api/v1/server/restapi/bgp" + "github.com/cilium/cilium/pkg/api" + "github.com/cilium/cilium/pkg/bgpv1/agent" +) + +func NewGetRoutePoliciesHandler(c *agent.Controller) restapi.GetBgpRoutePoliciesHandler { + return &getRoutePoliciesHandler{ + controller: c, + } +} + +type getRoutePoliciesHandler struct { + controller *agent.Controller +} + +func (h *getRoutePoliciesHandler) Handle(params restapi.GetBgpRoutePoliciesParams) middleware.Responder { + if h.controller == nil { + return api.Error(http.StatusNotImplemented, agent.ErrBGPControlPlaneDisabled) + } + + policies, err := h.controller.BGPMgr.GetRoutePolicies(params.HTTPRequest.Context(), params) + if err != nil { + return api.Error(http.StatusInternalServerError, fmt.Errorf("failed to get route policies: %w", err)) + } + return restapi.NewGetBgpRoutePoliciesOK().WithPayload(policies) +} diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_routes.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_routes.go new file mode 100644 index 0000000000..36809b7e63 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/api/get_routes.go @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package api + +import ( + "fmt" + "net/http" + + "github.com/go-openapi/runtime/middleware" + + restapi "github.com/cilium/cilium/api/v1/server/restapi/bgp" + "github.com/cilium/cilium/pkg/api" + "github.com/cilium/cilium/pkg/bgpv1/agent" +) + +func NewGetRoutesHandler(c *agent.Controller) restapi.GetBgpRoutesHandler { + return &getRoutesHandler{ + controller: c, + } +} + +type getRoutesHandler struct { + controller *agent.Controller +} + +func (h *getRoutesHandler) Handle(params restapi.GetBgpRoutesParams) middleware.Responder { + if h.controller == nil { + return api.Error(http.StatusNotImplemented, agent.ErrBGPControlPlaneDisabled) + } + routes, err := h.controller.BGPMgr.GetRoutes(params.HTTPRequest.Context(), params) + if err != nil { + return api.Error(http.StatusInternalServerError, fmt.Errorf("failed to get routes: %w", err)) + } + return restapi.NewGetBgpRoutesOK().WithPayload(routes) +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/bpf.go b/vendor/github.com/cilium/cilium/pkg/bpf/bpf.go new file mode 100644 index 0000000000..eec94868aa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/bpf.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "sync/atomic" + + "github.com/cilium/ebpf" + + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "bpf") + + preAllocateMapSetting uint32 = BPF_F_NO_PREALLOC +) + +const ( + // Flags for BPF_MAP_CREATE. Must match values from linux/bpf.h + BPF_F_NO_PREALLOC = 1 << 0 +) + +// EnableMapPreAllocation enables BPF map pre-allocation on map types that +// support it. This does not take effect on existing map although some map +// types could be recreated later when objCheck() runs. +func EnableMapPreAllocation() { + atomic.StoreUint32(&preAllocateMapSetting, 0) +} + +// DisableMapPreAllocation disables BPF map pre-allocation as a default +// setting. Some map types enforces pre-alloc strategy so this does not +// take effect in that case. Also note that this does not take effect on +// existing map although could be recreated later when objCheck() runs. +func DisableMapPreAllocation() { + atomic.StoreUint32(&preAllocateMapSetting, BPF_F_NO_PREALLOC) +} + +// GetPreAllocateMapFlags returns the map flags for map which use conditional +// pre-allocation. +func GetPreAllocateMapFlags(t ebpf.MapType) uint32 { + switch t { + // LPM Tries don't support preallocation. + case ebpf.LPMTrie: + return BPF_F_NO_PREALLOC + // Support disabling preallocation for these map types. + case ebpf.Hash, ebpf.PerCPUHash, ebpf.HashOfMaps: + return atomic.LoadUint32(&preAllocateMapSetting) + } + + return 0 +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/bpf_linux.go b/vendor/github.com/cilium/cilium/pkg/bpf/bpf_linux.go new file mode 100644 index 0000000000..1a8972bafc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/bpf_linux.go @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +package bpf + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/cilium/ebpf" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/spanstat" +) + +// createMap wraps a call to ebpf.NewMapWithOptions while measuring syscall duration. +func createMap(spec *ebpf.MapSpec, opts *ebpf.MapOptions) (*ebpf.Map, error) { + if opts == nil { + opts = &ebpf.MapOptions{} + } + + var duration *spanstat.SpanStat + if metrics.BPFSyscallDuration.IsEnabled() { + duration = spanstat.Start() + } + + m, err := ebpf.NewMapWithOptions(spec, *opts) + + if metrics.BPFSyscallDuration.IsEnabled() { + metrics.BPFSyscallDuration.WithLabelValues(metricOpCreate, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds()) + } + + return m, err +} + +func objCheck(m *ebpf.Map, path string, mapType ebpf.MapType, keySize, valueSize, maxEntries, flags uint32) bool { + scopedLog := log.WithField(logfields.Path, path) + mismatch := false + + if m.Type() != mapType { + scopedLog.WithFields(logrus.Fields{ + "old": m.Type(), + "new": mapType, + }).Warning("Map type mismatch for BPF map") + mismatch = true + } + + if m.KeySize() != keySize { + scopedLog.WithFields(logrus.Fields{ + "old": m.KeySize(), + "new": keySize, + }).Warning("Key-size mismatch for BPF map") + mismatch = true + } + + if m.ValueSize() != valueSize { + scopedLog.WithFields(logrus.Fields{ + "old": m.ValueSize(), + "new": valueSize, + }).Warning("Value-size mismatch for BPF map") + mismatch = true + } + + if m.MaxEntries() != maxEntries { + scopedLog.WithFields(logrus.Fields{ + "old": m.MaxEntries(), + "new": maxEntries, + }).Warning("Max entries mismatch for BPF map") + mismatch = true + } + if m.Flags() != flags { + scopedLog.WithFields(logrus.Fields{ + "old": m.Flags(), + "new": flags, + }).Warning("Flags mismatch for BPF map") + mismatch = true + } + + if mismatch { + if m.Type() == ebpf.ProgramArray { + return false + } + + scopedLog.Warning("Removing map to allow for property upgrade (expect map data loss)") + + // Kernel still holds map reference count via attached prog. + // Only exception is prog array, but that is already resolved + // differently. + os.Remove(path) + return true + } + + return false +} + +// OpenOrCreateMap attempts to load the pinned map at "pinDir/" if +// the spec is marked as Pinned. Any parent directories of pinDir are +// automatically created. Any pinned maps incompatible with the given spec are +// removed and recreated. +// +// If spec.Pinned is 0, a new Map is always created. +func OpenOrCreateMap(spec *ebpf.MapSpec, pinDir string) (*ebpf.Map, error) { + var opts ebpf.MapOptions + if spec.Pinning != 0 { + if pinDir == "" { + return nil, errors.New("cannot pin map to empty pinDir") + } + if spec.Name == "" { + return nil, errors.New("cannot load unnamed map from pin") + } + + if err := MkdirBPF(pinDir); err != nil { + return nil, fmt.Errorf("creating map base pinning directory: %w", err) + } + + opts.PinPath = pinDir + } + + m, err := createMap(spec, &opts) + if errors.Is(err, ebpf.ErrMapIncompatible) { + // Found incompatible map. Open the pin again to find out why. + m, err := ebpf.LoadPinnedMap(path.Join(pinDir, spec.Name), nil) + if err != nil { + return nil, fmt.Errorf("open pin of incompatible map: %w", err) + } + defer m.Close() + + log.WithField(logfields.Path, path.Join(pinDir, spec.Name)). + WithFields(logrus.Fields{ + "old": fmt.Sprintf("Type:%s KeySize:%d ValueSize:%d MaxEntries:%d Flags:%d", + m.Type(), m.KeySize(), m.ValueSize(), m.MaxEntries(), m.Flags()), + "new": fmt.Sprintf("Type:%s KeySize:%d ValueSize:%d MaxEntries:%d Flags:%d", + spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags), + }).Info("Unpinning map with incompatible properties") + + // Existing map incompatible with spec. Unpin so it can be recreated. + if err := m.Unpin(); err != nil { + return nil, err + } + + return createMap(spec, &opts) + } + + return m, err +} + +// GetMtime returns monotonic time that can be used to compare +// values with ktime_get_ns() BPF helper, e.g. needed to check +// the timeout in sec for BPF entries. We return the raw nsec, +// although that is not quite usable for comparison. Go has +// runtime.nanotime() but doesn't expose it as API. +func GetMtime() (uint64, error) { + var ts unix.Timespec + + err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts) + if err != nil { + return 0, fmt.Errorf("Unable get time: %s", err) + } + + return uint64(unix.TimespecToNsec(ts)), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_linux.go b/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_linux.go new file mode 100644 index 0000000000..da9c0b2afd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_linux.go @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +package bpf + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/components" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/mountinfo" +) + +var ( + // Path to where bpffs is mounted + bpffsRoot = defaults.BPFFSRoot + + // Set to true on first get request to detect misorder + lockedDown = false + once sync.Once + readMountInfo sync.Once + mountInfoPrefix string +) + +func lockDown() { + lockedDown = true +} + +func setBPFFSRoot(path string) { + if lockedDown { + panic("setBPFFSRoot() call after bpffsRoot was read") + } + bpffsRoot = path +} + +func BPFFSRoot() string { + once.Do(lockDown) + return bpffsRoot +} + +// TCGlobalsPath returns the absolute path to /tc/globals, used for +// legacy map pin paths. +func TCGlobalsPath() string { + once.Do(lockDown) + return filepath.Join(bpffsRoot, defaults.TCGlobalsPath) +} + +// CiliumPath returns the bpffs path to be used for Cilium object pins. +func CiliumPath() string { + once.Do(lockDown) + return filepath.Join(bpffsRoot, "cilium") +} + +// MkdirBPF wraps [os.MkdirAll] with the right permission bits for bpffs. +// Use this for ensuring the existence of directories on bpffs. +func MkdirBPF(path string) error { + return os.MkdirAll(path, 0755) +} + +func tcPathFromMountInfo(name string) string { + readMountInfo.Do(func() { + mountInfos, err := mountinfo.GetMountInfo() + if err != nil { + log.WithError(err).Fatal("Could not get mount info for map root lookup") + } + + for _, mountInfo := range mountInfos { + if mountInfo.FilesystemType == "bpf" { + mountInfoPrefix = filepath.Join(mountInfo.MountPoint, defaults.TCGlobalsPath) + return + } + } + + log.Fatal("Could not find BPF map root") + }) + + return filepath.Join(mountInfoPrefix, name) +} + +// MapPath returns a path for a BPF map with a given name. +func MapPath(name string) string { + if components.IsCiliumAgent() { + once.Do(lockDown) + return filepath.Join(TCGlobalsPath(), name) + } + return tcPathFromMountInfo(name) +} + +// LocalMapName returns the name for a BPF map that is local to the specified ID. +func LocalMapName(name string, id uint16) string { + return fmt.Sprintf("%s%05d", name, id) +} + +// LocalMapPath returns the path for a BPF map that is local to the specified ID. +func LocalMapPath(name string, id uint16) string { + return MapPath(LocalMapName(name, id)) +} + +var ( + mountOnce sync.Once +) + +// mountFS mounts the BPFFS filesystem into the desired mapRoot directory. +func mountFS(printWarning bool) error { + if printWarning { + log.Warning("================================= WARNING ==========================================") + log.Warning("BPF filesystem is not mounted. This will lead to network disruption when Cilium pods") + log.Warning("are restarted. Ensure that the BPF filesystem is mounted in the host.") + log.Warning("https://docs.cilium.io/en/stable/operations/system_requirements/#mounted-ebpf-filesystem") + log.Warning("====================================================================================") + } + + log.Infof("Mounting BPF filesystem at %s", bpffsRoot) + + mapRootStat, err := os.Stat(bpffsRoot) + if err != nil { + if os.IsNotExist(err) { + if err := MkdirBPF(bpffsRoot); err != nil { + return fmt.Errorf("unable to create bpf mount directory: %s", err) + } + } else { + return fmt.Errorf("failed to stat the mount path %s: %s", bpffsRoot, err) + + } + } else if !mapRootStat.IsDir() { + return fmt.Errorf("%s is a file which is not a directory", bpffsRoot) + } + + if err := unix.Mount(bpffsRoot, bpffsRoot, "bpf", 0, ""); err != nil { + return fmt.Errorf("failed to mount %s: %s", bpffsRoot, err) + } + return nil +} + +// hasMultipleMounts checks whether the current mapRoot has only one mount. +func hasMultipleMounts() (bool, error) { + num := 0 + + mountInfos, err := mountinfo.GetMountInfo() + if err != nil { + return false, err + } + + for _, mountInfo := range mountInfos { + if mountInfo.Root == "/" && mountInfo.MountPoint == bpffsRoot { + num++ + } + } + + return num > 1, nil +} + +// checkOrMountCustomLocation tries to check or mount the BPF filesystem in the +// given path. +func checkOrMountCustomLocation(bpfRoot string) error { + setBPFFSRoot(bpfRoot) + + // Check whether the custom location has a BPFFS mount. + mounted, bpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpfRoot) + if err != nil { + return err + } + + // If the custom location has no mount, let's mount BPFFS there. + if !mounted { + setBPFFSRoot(bpfRoot) + if err := mountFS(true); err != nil { + return err + } + + return nil + } + + // If the custom location already has a mount with some other filesystem than + // BPFFS, return the error. + if !bpffsInstance { + return fmt.Errorf("mount in the custom directory %s has a different filesystem than BPFFS", bpfRoot) + } + + log.Infof("Detected mounted BPF filesystem at %s", bpffsRoot) + + return nil +} + +// checkOrMountDefaultLocations tries to check or mount the BPF filesystem in +// standard locations, which are: +// - /sys/fs/bpf +// - /run/cilium/bpffs +// There is a procedure of determining which directory is going to be used: +// 1. Checking whether BPFFS filesystem is mounted in /sys/fs/bpf. +// 2. If there is no mount, then mount BPFFS in /sys/fs/bpf and finish there. +// 3. If there is a BPFFS mount, finish there. +// 4. If there is a mount, but with the other filesystem, then it means that most +// probably Cilium is running inside container which has mounted /sys/fs/bpf +// from host, but host doesn't have proper BPFFS mount, so that mount is just +// the empty directory. In that case, mount BPFFS under /run/cilium/bpffs. +func checkOrMountDefaultLocations() error { + // Check whether /sys/fs/bpf has a BPFFS mount. + mounted, bpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpffsRoot) + if err != nil { + return err + } + + // If /sys/fs/bpf is not mounted at all, we should mount + // BPFFS there. + if !mounted { + if err := mountFS(false); err != nil { + return err + } + + return nil + } + + if !bpffsInstance { + // If /sys/fs/bpf has a mount but with some other filesystem + // than BPFFS, it means that Cilium is running inside container + // and /sys/fs/bpf is not mounted on host. We should mount BPFFS + // in /run/cilium/bpffs automatically. This will allow operation + // of Cilium but will result in unmounting of the filesystem + // when the pod is restarted. This in turn will cause resources + // such as the connection tracking table of the BPF programs to + // be released which will cause all connections into local + // containers to be dropped. User is going to be warned. + log.Warnf("BPF filesystem is going to be mounted automatically "+ + "in %s. However, it probably means that Cilium is running "+ + "inside container and BPFFS is not mounted on the host. "+ + "for more information, see: https://cilium.link/err-bpf-mount", + defaults.BPFFSRootFallback, + ) + setBPFFSRoot(defaults.BPFFSRootFallback) + + cMounted, cBpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpffsRoot) + if err != nil { + return err + } + if !cMounted { + if err := mountFS(false); err != nil { + return err + } + } else if !cBpffsInstance { + log.Fatalf("%s is mounted but has a different filesystem than BPFFS", defaults.BPFFSRootFallback) + } + } + + log.Infof("Detected mounted BPF filesystem at %s", bpffsRoot) + + return nil +} + +func checkOrMountFS(bpfRoot string) error { + if bpfRoot == "" { + if err := checkOrMountDefaultLocations(); err != nil { + return err + } + } else { + if err := checkOrMountCustomLocation(bpfRoot); err != nil { + return err + } + } + + multipleMounts, err := hasMultipleMounts() + if err != nil { + return err + } + if multipleMounts { + return fmt.Errorf("multiple mount points detected at %s", bpffsRoot) + } + + return nil +} + +// CheckOrMountFS checks or mounts the BPF filesystem and then +// opens/creates/deletes all maps which have previously been scheduled to be +// opened/created/deleted. +// +// If printWarning is set, will print a warning if bpffs has not previously been +// mounted. +func CheckOrMountFS(bpfRoot string) { + mountOnce.Do(func() { + if err := checkOrMountFS(bpfRoot); err != nil { + log.WithError(err).Fatal("Unable to mount BPF filesystem") + } + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_migrate.go b/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_migrate.go new file mode 100644 index 0000000000..d0a61be998 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/bpffs_migrate.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/cilium/ebpf" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/logging/logfields" +) + +const bpffsPending = ":pending" + +// StartBPFFSMigration the map migration process for a given ELF's maps. +// When a new ELF contains a map definition that differs from its existing (pinned) +// counterpart, re-pin it to its current path suffixed by ':pending'. +// A map's type, key size, value size, flags and max entries are compared to the given spec. +// +// Takes a bpffsPath explicitly since it does not necessarily execute within +// the same runtime as the agent. It is imported from a Cilium cmd that takes +// its bpffs path from an env. +func StartBPFFSMigration(bpffsPath string, coll *ebpf.CollectionSpec) error { + if coll == nil { + return errors.New("can't migrate a nil CollectionSpec") + } + + for name, spec := range coll.Maps { + // Skip map specs without the pinning flag. Also takes care of skipping .data, + // .rodata and .bss. + if spec.Pinning == 0 { + continue + } + + // Re-pin the map with ':pending' suffix if incoming spec differs from + // the currently-pinned map. + if err := RepinMap(bpffsPath, name, spec); err != nil { + return err + } + } + + return nil +} + +// FinalizeBPFFSMigration finalizes the migration of an ELF's maps. +// If revert is true, any pending maps are re-pinned back to their original +// locations. If revert is false, any pending maps are unpinned (deleted). +// +// Takes a bpffsPath explicitly since it does not necessarily execute within +// the same runtime as the agent. It is imported from a Cilium cmd that takes +// its bpffs path from an env. +func FinalizeBPFFSMigration(bpffsPath string, coll *ebpf.CollectionSpec, revert bool) error { + if coll == nil { + return errors.New("can't migrate a nil CollectionSpec") + } + + for name, spec := range coll.Maps { + // Skip map specs without the pinning flag. Also takes care of skipping .data, + // .rodata and .bss. + // Don't unpin existing maps if their new versions are missing the pinning flag. + if spec.Pinning == 0 { + continue + } + + if err := FinalizeMap(bpffsPath, name, revert); err != nil { + return err + } + } + + return nil +} + +// RepinMap opens a map from bpffs by its pin in '/tc/globals/', +// compares its properties against the incoming spec and re-pins it to +// ':pending' if any of its properties differ. +func RepinMap(bpffsPath string, name string, spec *ebpf.MapSpec) error { + file := filepath.Join(bpffsPath, name) + pinned, err := ebpf.LoadPinnedMap(file, nil) + + // Given map was not pinned, nothing to do. + if errors.Is(err, unix.ENOENT) { + return nil + } + + if err != nil { + return fmt.Errorf("map not found at path %s: %v", name, err) + } + defer pinned.Close() + + if pinned.Type() == spec.Type && + pinned.KeySize() == spec.KeySize && + pinned.ValueSize() == spec.ValueSize && + pinned.Flags() == spec.Flags && + pinned.MaxEntries() == spec.MaxEntries { + // cilium_calls_xdp is shared between XDP interfaces and should only be + // migrated if the existing map is incompatible. + if spec.Name == "cilium_calls_xdp" { + return nil + } + // Maps prefixed with cilium_calls_ should never be reused by subsequent ELF + // loads and should be migrated unconditionally. + if !strings.HasPrefix(spec.Name, "cilium_calls_") { + return nil + } + } + + dest := file + bpffsPending + + log.WithFields(logrus.Fields{ + logfields.BPFMapName: name, + logfields.BPFMapPath: file, + }).Infof("Re-pinning map with '%s' suffix", bpffsPending) + + if err := os.Remove(dest); err == nil { + log.WithFields(logrus.Fields{ + logfields.BPFMapName: name, + logfields.BPFMapPath: dest, + }).Info("Removed pending pinned map, did the agent die unexpectedly?") + } + + // Atomically re-pin the map to its new path. + if err := pinned.Pin(dest); err != nil { + return err + } + + return nil +} + +// FinalizeMap opens the ':pending' Map pin of the given named Map from bpffs. +// If the given map is not found in bpffs, returns nil. +// If revert is true, the map will be re-pinned back to its initial locations. +// If revert is false, the map will be unpinned. +func FinalizeMap(bpffsPath, name string, revert bool) error { + // Attempt to open a 'pending' Map pin. + file := filepath.Join(bpffsPath, name+bpffsPending) + pending, err := ebpf.LoadPinnedMap(file, nil) + + // Given map was not pending recreation, nothing to do. + if errors.Is(err, unix.ENOENT) { + return nil + } + + if err != nil { + return fmt.Errorf("unable to open pinned map at path %s: %v", name, err) + } + + // Pending Map was found on bpffs and needs to be reverted. + if revert { + dest := filepath.Join(bpffsPath, name) + log.WithFields(logrus.Fields{ + logfields.BPFMapPath: dest, + logfields.BPFMapName: name, + }).Infof("Repinning without '%s' suffix after failed migration", bpffsPending) + + if err := os.Remove(dest); err == nil { + log.WithFields(logrus.Fields{ + logfields.BPFMapName: name, + logfields.BPFMapPath: dest, + }).Warn("Removed new pinned map after failed migration") + } + + // Atomically re-pin the map to its original path. + if err := pending.Pin(dest); err != nil { + return err + } + + return nil + } + + log.WithFields(logrus.Fields{ + logfields.BPFMapPath: file, + logfields.BPFMapName: name, + }).Info("Unpinning map after successful recreation") + + // Pending Map found on bpffs and its replacement was successfully loaded. + // Unpin the old map since it no longer needs to be interacted with from userspace. + return pending.Unpin() +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/bpfmap.go b/vendor/github.com/cilium/cilium/pkg/bpf/bpfmap.go new file mode 100644 index 0000000000..61a0b89f24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/bpfmap.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import "github.com/cilium/cilium/pkg/hive/cell" + +// BpfMap defines the base interface every BPF map needs to implement. +// +// Its main purpose is to register a BPF map via value group `bpf-maps`. See [MapOut]. +type BpfMap interface{} + +// MapOut ensures that maps are created before the datapath loader +// is invoked. +type MapOut[T any] struct { + cell.Out + + Map T + BpfMap BpfMap `group:"bpf-maps"` +} + +func NewMapOut[T any](m T) MapOut[T] { + return MapOut[T]{Map: m, BpfMap: m} +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/collection.go b/vendor/github.com/cilium/cilium/pkg/bpf/collection.go new file mode 100644 index 0000000000..ea9f955c92 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/collection.go @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" +) + +const globalDataMap = ".rodata.config" + +// LoadCollectionSpec loads the eBPF ELF at the given path and parses it into +// a CollectionSpec. This spec is only a blueprint of the contents of the ELF +// and does not represent any live resources that have been loaded into the +// kernel. +// +// This is a wrapper around ebpf.LoadCollectionSpec that parses legacy iproute2 +// bpf_elf_map definitions (only used for prog_arrays at the time of writing) +// and assigns tail calls annotated with `__section_tail` macros to their +// intended maps and slots. +func LoadCollectionSpec(path string) (*ebpf.CollectionSpec, error) { + spec, err := ebpf.LoadCollectionSpec(path) + if err != nil { + return nil, err + } + + if err := iproute2Compat(spec); err != nil { + return nil, err + } + + if err := classifyProgramTypes(spec); err != nil { + return nil, err + } + + return spec, nil +} + +// iproute2Compat parses the Extra field of each MapSpec in the CollectionSpec. +// This extra portion is present in legacy bpf_elf_map definitions and must be +// handled before the map can be loaded into the kernel. +// +// It parses the ELF section name of each ProgramSpec to extract any map/slot +// mappings for prog arrays used as tail call maps. The spec's programs are then +// inserted into the appropriate map and slot. +// +// TODO(timo): Remove when bpf_elf_map map definitions are no longer used after +// moving away from iproute2+libbpf. +func iproute2Compat(spec *ebpf.CollectionSpec) error { + // Parse legacy iproute2 u32 id and pinning fields. + maps := make(map[uint32]*ebpf.MapSpec) + for _, m := range spec.Maps { + if m.Extra != nil && m.Extra.Len() > 0 { + tail := struct { + ID uint32 + Pinning uint32 + _ uint64 // inner_id + inner_idx + }{} + if err := binary.Read(m.Extra, spec.ByteOrder, &tail); err != nil { + return fmt.Errorf("reading iproute2 map definition: %w", err) + } + + if tail.Pinning > 0 { + m.Pinning = ebpf.PinByName + } + + // Index maps by their iproute2 .id if any, so X/Y ELF section names can + // be matched against them. + if tail.ID != 0 { + if m2 := maps[tail.ID]; m2 != nil { + return fmt.Errorf("maps %s and %s have duplicate iproute2 map ID %d", m.Name, m2.Name, tail.ID) + } + maps[tail.ID] = m + } + } + } + + for n, p := range spec.Programs { + // Parse the program's section name to determine which prog array and slot it + // needs to be inserted into. For example, a section name of '2/14' means to + // insert into the map with the .id field of 2 at index 14. + // Uses %v to automatically detect slot's mathematical base, since they can + // appear either in dec or hex, e.g. 1/0x0515. + var id, slot uint32 + if _, err := fmt.Sscanf(p.SectionName, "%d/%v", &id, &slot); err == nil { + // Assign the prog name and slot to the map with the iproute2 .id obtained + // from the program's section name. The lib will load the ProgramSpecs + // and insert the corresponding Programs into the prog array at load time. + m := maps[id] + if m == nil { + return fmt.Errorf("no map with iproute2 map .id %d", id) + } + m.Contents = append(maps[id].Contents, ebpf.MapKV{Key: slot, Value: n}) + } + } + + return nil +} + +// LoadCollection loads the given spec into the kernel with the specified opts. +// +// The value given in ProgramOptions.LogSize is used as the starting point for +// sizing the verifier's log buffer and defaults to 4MiB. On each retry, the +// log buffer quadruples in size, for a total of 5 attempts. If that proves +// insufficient, a truncated ebpf.VerifierError is returned. +// +// Any maps marked as pinned in the spec are automatically loaded from the path +// given in opts.Maps.PinPath and will be used instead of creating new ones. +// MapSpecs that differ (type/key/value/max/flags) from their pinned versions +// will result in an ebpf.ErrMapIncompatible here and the map must be removed +// before loading the CollectionSpec. +func LoadCollection(spec *ebpf.CollectionSpec, opts ebpf.CollectionOptions) (*ebpf.Collection, error) { + if spec == nil { + return nil, errors.New("can't load nil CollectionSpec") + } + + // Copy spec so the modifications below don't affect the input parameter, + // allowing the spec to be safely re-used by the caller. + spec = spec.Copy() + + if err := inlineGlobalData(spec); err != nil { + return nil, fmt.Errorf("inlining global data: %w", err) + } + + // Set initial size of verifier log buffer. + // + // Up until kernel 5.1, the maximum log size is (2^24)-1. In 5.2, this was + // increased to (2^30)-1 by 7a9f5c65abcc ("bpf: increase verifier log limit"). + // + // The default value of (2^22)-1 was chosen to be large enough to fit the log + // of most Cilium programs, while falling just within the 5.1 maximum size in + // one of the steps of the multiplication loop below. Without the -1, it would + // overshoot the cap to 2^24, making e.g. verifier tests unable to load the + // program if the previous size (2^22) was too small to fit the log. + if opts.Programs.LogSize == 0 { + opts.Programs.LogSize = 4_194_303 + } + + attempt := 1 + for { + coll, err := ebpf.NewCollectionWithOptions(spec, opts) + if err == nil { + return coll, nil + } + + // Bump LogSize and retry if there's a truncated VerifierError. + var ve *ebpf.VerifierError + if errors.As(err, &ve) && ve.Truncated { + if attempt >= 5 { + return nil, fmt.Errorf("%d-byte truncated verifier log after %d attempts: %w", opts.Programs.LogSize, attempt, err) + } + + // Retry with non-zero log level to avoid retrying with log disabled. + if opts.Programs.LogLevel == 0 { + opts.Programs.LogLevel = ebpf.LogLevelBranch + } + + opts.Programs.LogSize *= 4 + + attempt++ + + continue + } + + // Not a truncated VerifierError. + return nil, err + } +} + +// classifyProgramTypes sets the type of ProgramSpecs which the library cannot +// automatically classify due to them being in unrecognized ELF sections. Only +// programs of type UnspecifiedProgram are modified. +// +// Cilium uses the iproute2 X/Y section name convention for assigning programs +// to prog array slots, which is also not supported. +// +// TODO(timo): When iproute2 is no longer used for any loading, tail call progs +// can receive proper prefixes. +func classifyProgramTypes(spec *ebpf.CollectionSpec) error { + var t ebpf.ProgramType + for name, p := range spec.Programs { + // If the loader was able to classify a program, go with the verdict. + if p.Type != ebpf.UnspecifiedProgram { + t = p.Type + break + } + + // Assign a program type based on the first recognized function name. + switch name { + // bpf_xdp.c + case "cil_xdp_entry": + t = ebpf.XDP + case + // bpf_lxc.c + "cil_from_container", "cil_to_container", + // bpf_host.c + "cil_from_netdev", "cil_from_host", "cil_to_netdev", "cil_to_host", + // bpf_network.c + "cil_from_network", + // bpf_overlay.c + "cil_to_overlay", "cil_from_overlay": + t = ebpf.SchedCLS + default: + continue + } + + break + } + + for _, p := range spec.Programs { + if p.Type == ebpf.UnspecifiedProgram { + p.Type = t + } + } + + if t == ebpf.UnspecifiedProgram { + return errors.New("unable to classify program types") + } + + return nil +} + +// inlineGlobalData replaces all map loads from a global data section with +// immediate dword loads, effectively performing those map lookups in the +// loader. This is done for compatibility with kernels that don't support +// global data maps yet. +// +// This code interacts with the DECLARE_CONFIG macro in the BPF C code base. +func inlineGlobalData(spec *ebpf.CollectionSpec) error { + vars, err := globalData(spec) + if err != nil { + return err + } + if vars == nil { + // No static data, nothing to replace. + return nil + } + + for _, prog := range spec.Programs { + for i, ins := range prog.Instructions { + if !ins.IsLoadFromMap() || ins.Src != asm.PseudoMapValue { + continue + } + + if ins.Reference() != globalDataMap { + return fmt.Errorf("global constants must be in %s, but found reference to %s", globalDataMap, ins.Reference()) + } + + // Get the offset of the read within the target map, + // stored in the 32 most-significant bits of Constant. + // Equivalent to Instruction.mapOffset(). + off := uint32(uint64(ins.Constant) >> 32) + + // Look up the value of the variable stored at the Datasec offset pointed + // at by the instruction. + value, ok := vars[off] + if !ok { + return fmt.Errorf("no global constant found in %s at offset %d", globalDataMap, off) + } + + imm := spec.ByteOrder.Uint64(value) + + // Replace the map load with an immediate load. Must be a dword load + // to match the instruction width of a map load. + r := asm.LoadImm(ins.Dst, int64(imm), asm.DWord) + + // Preserve metadata of the original instruction. Otherwise, a program's + // first instruction could be stripped of its func_info or Symbol + // (function start) annotations. + r.Metadata = ins.Metadata + + prog.Instructions[i] = r + } + } + + return nil +} + +type varOffsets map[uint32][]byte + +// globalData gets the contents of the first entry in the global data map +// and removes it from the spec to prevent it from being created in the kernel. +func globalData(spec *ebpf.CollectionSpec) (varOffsets, error) { + dm := spec.Maps[globalDataMap] + if dm == nil { + return nil, nil + } + + if dl := len(dm.Contents); dl != 1 { + return nil, fmt.Errorf("expected one key in %s, found %d", globalDataMap, dl) + } + + ds, ok := dm.Value.(*btf.Datasec) + if !ok { + return nil, fmt.Errorf("no BTF datasec found for %s", globalDataMap) + } + + data, ok := (dm.Contents[0].Value).([]byte) + if !ok { + return nil, fmt.Errorf("expected %s value to be a byte slice, got: %T", + globalDataMap, dm.Contents[0].Value) + } + + // Slice up the binary contents of the global data map according to the + // variables described in its Datasec. + out := make(varOffsets) + for _, vsi := range ds.Vars { + if vsi.Size > 8 { + return nil, fmt.Errorf("variables larger than 8 bytes are not supported (got %d)", vsi.Size) + } + + if _, ok := out[vsi.Offset]; ok { + return nil, fmt.Errorf("duplicate VarSecInfo for offset %d", vsi.Offset) + } + + // Allocate a fixed slice of 8 bytes so it can be used to store in an imm64 + // instruction later using ByteOrder.Uint64(). + v := make([]byte, 8) + copy(v, data[vsi.Offset:vsi.Offset+vsi.Size]) + + // Emit the variable's value by its offset in the datasec. + out[vsi.Offset] = v + } + + // Remove the map definition to skip loading it into the kernel. + delete(spec.Maps, globalDataMap) + + return out, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/doc.go b/vendor/github.com/cilium/cilium/pkg/bpf/doc.go new file mode 100644 index 0000000000..74ab2e5698 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package bpf provides functions that allow golang programs to interact with +// bpf maps. +// +groupName=pkg +package bpf diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/endpoint.go b/vendor/github.com/cilium/cilium/pkg/bpf/endpoint.go new file mode 100644 index 0000000000..3c73fa3bf9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/endpoint.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "fmt" + "net" + + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + ippkg "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/types" +) + +// Must be in sync with ENDPOINT_KEY_* in +const ( + EndpointKeyIPv4 uint8 = 1 + EndpointKeyIPv6 uint8 = 2 +) + +// EndpointKey represents the key value of the endpoints BPF map +// +// Must be in sync with struct endpoint_key in +type EndpointKey struct { + // represents both IPv6 and IPv4 (in the lowest four bytes) + IP types.IPv6 `align:"$union0"` + Family uint8 `align:"family"` + Key uint8 `align:"key"` + ClusterID uint16 `align:"cluster_id"` +} + +// NewEndpointKey returns an EndpointKey based on the provided IP address. The +// address family is automatically detected. +func NewEndpointKey(ip net.IP, clusterID uint16) EndpointKey { + result := EndpointKey{} + + if ip4 := ip.To4(); ip4 != nil { + result.Family = EndpointKeyIPv4 + copy(result.IP[:], ip4) + } else { + result.Family = EndpointKeyIPv6 + copy(result.IP[:], ip) + } + result.Key = 0 + result.ClusterID = clusterID + + return result +} + +// ToIP converts the EndpointKey into a net.IP structure. +func (k EndpointKey) ToIP() net.IP { + switch k.Family { + case EndpointKeyIPv4: + return k.IP[:4] + case EndpointKeyIPv6: + return k.IP[:] + } + return nil +} + +// String provides a string representation of the EndpointKey. +func (k EndpointKey) String() string { + if ip := k.ToIP(); ip != nil { + addrCluster := cmtypes.AddrClusterFrom( + ippkg.MustAddrFromIP(ip), + uint32(k.ClusterID), + ) + return addrCluster.String() + ":" + fmt.Sprintf("%d", k.Key) + } + return "nil" +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/events.go b/vendor/github.com/cilium/cilium/pkg/bpf/events.go new file mode 100644 index 0000000000..73e217df8d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/events.go @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/cilium/cilium/pkg/container" + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +// Action describes an action for map buffer events. +type Action uint8 + +const ( + // MapUpdate describes a map.Update event. + MapUpdate Action = iota + // MapDelete describes a map.Delete event. + MapDelete + // MapDeleteAll describes a map.DeleteAll event which is aggregated into a single event + // to minimize memory and subscription buffer usage. + MapDeleteAll +) + +var bpfEventBufferGCControllerGroup = controller.NewGroup("bpf-event-buffer-gc") + +// String returns a string representation of an Action. +func (e Action) String() string { + switch e { + case MapUpdate: + return "update" + case MapDelete: + return "delete" + case MapDeleteAll: + return "delete-all" + default: + return "unknown" + } +} + +// Event contains data about a bpf operation event. +type Event struct { + Timestamp time.Time + action Action + cacheEntry +} + +// GetAction returns the event action string. +func (e *Event) GetAction() string { + return e.action.String() +} + +// GetKey returns the string representation of a event key. +func (e Event) GetKey() string { + if e.cacheEntry.Key == nil { + return "" + } + return e.cacheEntry.Key.String() +} + +// GetValue returns the string representation of a event value. +// Nil values (such as with deletes) are returned as a canonical +// string representation. +func (e Event) GetValue() string { + if e.cacheEntry.Value == nil { + return "" + } + return e.cacheEntry.Value.String() +} + +// GetLastError returns the last error for an event. +func (e Event) GetLastError() error { + return e.cacheEntry.LastError +} + +// GetDesiredAction returns the desired action enum for an event. +func (e Event) GetDesiredAction() DesiredAction { + return e.cacheEntry.DesiredAction +} + +func (m *Map) initEventsBuffer(maxSize int, eventsTTL time.Duration) { + b := &eventsBuffer{ + buffer: container.NewRingBuffer(maxSize), + eventTTL: eventsTTL, + } + if b.eventTTL > 0 { + m.scopedLogger().Debug("starting bpf map event buffer GC controller") + mapControllers.UpdateController( + fmt.Sprintf("bpf-event-buffer-gc-%s", m.name), + controller.ControllerParams{ + Group: bpfEventBufferGCControllerGroup, + DoFunc: func(_ context.Context) error { + m.scopedLogger().Debugf("clearing bpf map events older than %s", b.eventTTL) + b.buffer.Compact(func(e interface{}) bool { + event, ok := e.(*Event) + if !ok { + log.WithError(wrongObjTypeErr(e)).Error("Failed to compact the event buffer") + return false + } + return time.Since(event.Timestamp) < b.eventTTL + }) + return nil + }, + RunInterval: b.eventTTL, + }, + ) + } + m.events = b +} + +// eventsBuffer stores a buffer of events for auditing and debugging +// purposes. +type eventsBuffer struct { + buffer *container.RingBuffer + eventTTL time.Duration + subsLock lock.RWMutex + subscriptions []*Handle +} + +// Handle allows for handling event streams safely outside of this package. +// The key design consideration for event streaming is that it is non-blocking. +// The eventsBuffer takes care of closing handles when their consumer is not reading +// off the buffer (or is not reading off it fast enough). +type Handle struct { + c chan *Event + closed atomic.Bool + closer *sync.Once + err error +} + +// Returns read only channel for Handle subscription events. Channel should be closed with +// handle.Close() function. +func (h *Handle) C() <-chan *Event { + return h.c // return read only channel to prevent closing outside of Close(...). +} + +// Close allows for safaley closing of a handle. +func (h *Handle) Close() { + h.close(nil) +} + +func (h *Handle) close(err error) { + h.closer.Do(func() { + close(h.c) + h.err = err + h.closed.Store(true) + }) +} + +func (h *Handle) isClosed() bool { + return h.closed.Load() +} + +func (h *Handle) isFull() bool { + return len(h.c) >= cap(h.c) +} + +// This configures how big buffers are for channels used for streaming events from +// eventsBuffer. +// +// To prevent blocking bpf.Map operations, subscribed events are buffered per client handle. +// How fast subscribers will need to proceess events will depend on the event throughput. +// In this case, our throughput will be expected to be not above 100 events a second. +// Therefore the consumer will have 10ms to process each event. The channel is also +// given a constant buffer size in the case where events arrive at once (i.e. all 100 events +// arriving at the top of the second). +// +// NOTE: Although using timers/timed-contexts seems like an obvious choice for this use case, +// the timer.After implementation actually uses a large amount of memory. To reduce memory spikes +// in high throughput cases, we instead just use a sufficiently buffered channel. +const ( + eventSubChanBufferSize = 32 + maxConcurrentEventSubs = 32 +) + +func (eb *eventsBuffer) hasSubCapacity() bool { + eb.subsLock.RLock() + defer eb.subsLock.RUnlock() + return len(eb.subscriptions) <= maxConcurrentEventSubs +} + +func (eb *eventsBuffer) dumpAndSubscribe(callback EventCallbackFunc, follow bool) (*Handle, error) { + if follow && !eb.hasSubCapacity() { + return nil, fmt.Errorf("exceeded max number of concurrent map event subscriptions %d", maxConcurrentEventSubs) + } + + if callback != nil { + eb.dumpWithCallback(callback) + } + + if !follow { + return nil, nil + } + + h := &Handle{ + c: make(chan *Event, eventSubChanBufferSize), + closer: &sync.Once{}, + } + + eb.subsLock.Lock() + defer eb.subsLock.Unlock() + eb.subscriptions = append(eb.subscriptions, h) + return h, nil +} + +// DumpAndSubscribe dumps existing buffer, if callback is not nil. Followed by creating a +// subscription to the maps events buffer and returning the handle. +// These actions are done together so as to prevent possible missed events between the handoff +// of the callback and sub handle creation. +func (m *Map) DumpAndSubscribe(callback EventCallbackFunc, follow bool) (*Handle, error) { + // note: we have to hold rlock for the duration of this to prevent missed events between dump and sub. + // dumpAndSubscribe maintains its own write-lock for updating subscribers. + m.lock.RLock() + defer m.lock.RUnlock() + if !m.eventsBufferEnabled { + return nil, fmt.Errorf("map events not enabled for map %q", m.name) + } + return m.events.dumpAndSubscribe(callback, follow) +} + +func (m *Map) IsEventsEnabled() bool { + return m.eventsBufferEnabled +} + +func (eb *eventsBuffer) add(e *Event) { + eb.buffer.Add(e) + var activeSubs []*Handle + activeSubsLock := &lock.Mutex{} + wg := &sync.WaitGroup{} + for i, sub := range eb.subscriptions { + if sub.isClosed() { // sub will be removed. + continue + } + wg.Add(1) + go func(sub *Handle, i int) { + defer wg.Done() + if sub.isFull() { + err := fmt.Errorf("timed out waiting to send sub map event") + log.WithError(err).Warnf("subscription channel buffer %d was full, closing subscription", i) + sub.close(err) + } else { + sub.c <- e + activeSubsLock.Lock() + activeSubs = append(activeSubs, sub) + activeSubsLock.Unlock() + } + }(sub, i) + } + wg.Wait() + eb.subsLock.Lock() + defer eb.subsLock.Unlock() + eb.subscriptions = activeSubs +} + +func wrongObjTypeErr(i any) error { + return fmt.Errorf("BUG: wrong object type in event ring buffer: %T", i) +} + +func (eb *eventsBuffer) eventIsValid(e interface{}) bool { + event, ok := e.(*Event) + if !ok { + log.WithError(wrongObjTypeErr(e)).Error("Could not dump contents of events buffer") + return false + } + return eb.eventTTL == 0 || time.Since(event.Timestamp) <= eb.eventTTL +} + +// EventCallbackFunc is used to dump events from a event buffer. +type EventCallbackFunc func(*Event) + +func (eb *eventsBuffer) dumpWithCallback(callback EventCallbackFunc) { + eb.buffer.IterateValid(eb.eventIsValid, func(e interface{}) { + event, ok := e.(*Event) + if !ok { + log.WithError(wrongObjTypeErr(e)).Error("Could not dump contents of events buffer") + return + } + callback(event) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/link.go b/vendor/github.com/cilium/cilium/pkg/bpf/link.go new file mode 100644 index 0000000000..7749b4618a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/link.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" +) + +// UpdateLink loads a pinned bpf_link at the given pin path and updates its +// program. +// +// Returns [os.ErrNotExist] if the pin is not found. +// +// Updating the link can fail if it is defunct (the hook it points to no longer +// exists). +func UpdateLink(pin string, prog *ebpf.Program) error { + l, err := link.LoadPinnedLink(pin, &ebpf.LoadPinOptions{}) + if err != nil { + return fmt.Errorf("opening pinned link %s: %w", pin, err) + } + defer l.Close() + + if err = l.Update(prog); err != nil { + return fmt.Errorf("updating link %s: %w", pin, err) + } + return nil +} + +// DetachLink loads and unpins a bpf_link at the given pin path. +// +// Returns [os.ErrNotExist] if the pin is not found. +func UnpinLink(pin string) error { + l, err := link.LoadPinnedLink(pin, &ebpf.LoadPinOptions{}) + if err != nil { + return fmt.Errorf("opening pinned link %s: %w", pin, err) + } + defer l.Close() + + if err := l.Unpin(); err != nil { + return fmt.Errorf("unpinning link %s: %w", pin, err) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/map.go b/vendor/github.com/cilium/cilium/pkg/bpf/map.go new file mode 100644 index 0000000000..6d59883c4f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/map.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "regexp" + + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/time" +) + +const ( + // maxSyncErrors is the maximum consecutive errors syncing before the + // controller bails out + maxSyncErrors = 512 + + // errorResolverSchedulerMinInterval is the minimum interval for the + // error resolver to be scheduled. This minimum interval ensures not to + // overschedule if a large number of updates fail in a row. + errorResolverSchedulerMinInterval = 5 * time.Second + + // errorResolverSchedulerDelay is the delay to update the controller + // after determination that a run is needed. The delay allows to + // schedule the resolver after series of updates have failed. + errorResolverSchedulerDelay = 200 * time.Millisecond +) + +var ( + mapControllers = controller.NewManager() +) + +// DesiredAction is the action to be performed on the BPF map +type DesiredAction uint8 + +const ( + // OK indicates that to further action is required and the entry is in + // sync + OK DesiredAction = iota + + // Insert indicates that the entry needs to be created or updated + Insert + + // Delete indicates that the entry needs to be deleted + Delete +) + +func (d DesiredAction) String() string { + switch d { + case OK: + return "sync" + case Insert: + return "to-be-inserted" + case Delete: + return "to-be-deleted" + default: + return "unknown" + } +} + +var commonNameRegexps = []*regexp.Regexp{ + regexp.MustCompile(`^(cilium_)(.+)_reserved_[0-9]+$`), + regexp.MustCompile(`^(cilium_)(.+)_netdev_ns_[0-9]+$`), + regexp.MustCompile(`^(cilium_)(.+)_overlay_[0-9]+$`), + regexp.MustCompile(`^(cilium_)(.+)_[0-9]+$`), + regexp.MustCompile(`^(cilium_)(.+)+$`), +} + +func extractCommonName(name string) string { + for _, r := range commonNameRegexps { + if replaced := r.ReplaceAllString(name, `$2`); replaced != name { + return replaced + } + } + + return name +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/map_linux.go b/vendor/github.com/cilium/cilium/pkg/bpf/map_linux.go new file mode 100644 index 0000000000..4e44e61c39 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/map_linux.go @@ -0,0 +1,1206 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +package bpf + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path" + "reflect" + "strings" + + "github.com/cilium/ebpf" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/spanstat" + "github.com/cilium/cilium/pkg/time" +) + +var ( + // ErrMaxLookup is returned when the maximum number of map element lookups has + // been reached. + ErrMaxLookup = errors.New("maximum number of lookups reached") + + bpfMapSyncControllerGroup = controller.NewGroup("bpf-map-sync") +) + +type MapKey interface { + fmt.Stringer + + // New must return a pointer to a new MapKey. + New() MapKey +} + +type MapValue interface { + fmt.Stringer + + // New must return a pointer to a new MapValue. + New() MapValue +} + +type cacheEntry struct { + Key MapKey + Value MapValue + + DesiredAction DesiredAction + LastError error +} + +type Map struct { + m *ebpf.Map + // spec will be nil after the map has been created + spec *ebpf.MapSpec + + key MapKey + value MapValue + + name string + path string + lock lock.RWMutex + + // cachedCommonName is the common portion of the name excluding any + // endpoint ID + cachedCommonName string + + // enableSync is true when synchronization retries have been enabled. + enableSync bool + + // withValueCache is true when map cache has been enabled + withValueCache bool + + // cache as key/value entries when map cache is enabled or as key-only when + // pressure metric is enabled + cache map[string]*cacheEntry + + // errorResolverLastScheduled is the timestamp when the error resolver + // was last scheduled + errorResolverLastScheduled time.Time + + // outstandingErrors states whether there are outstanding errors, occurred while + // syncing an entry with the kernel, that need to be resolved. This variable exists + // to avoid iterating over the full cache to check if reconciliation is necessary, + // but it is possible that it gets out of sync if an error is automatically + // resolved while performing a subsequent Update/Delete operation on the same key. + outstandingErrors bool + + // pressureGauge is a metric that tracks the pressure on this map + pressureGauge *metrics.GaugeWithThreshold + + // is true when events buffer is enabled. + eventsBufferEnabled bool + + // contains optional event buffer which stores last n bpf map events. + events *eventsBuffer + + // group is the metric group name for this map, it classifies maps of the same + // type that share the same metric group. + group string +} + +func (m *Map) Type() ebpf.MapType { + if m.m != nil { + return m.m.Type() + } + if m.spec != nil { + return m.spec.Type + } + return ebpf.UnspecifiedMap +} + +func (m *Map) KeySize() uint32 { + if m.m != nil { + return m.m.KeySize() + } + if m.spec != nil { + return m.spec.KeySize + } + return 0 +} + +func (m *Map) ValueSize() uint32 { + if m.m != nil { + return m.m.ValueSize() + } + if m.spec != nil { + return m.spec.ValueSize + } + return 0 +} + +func (m *Map) MaxEntries() uint32 { + if m.m != nil { + return m.m.MaxEntries() + } + if m.spec != nil { + return m.spec.MaxEntries + } + return 0 +} + +func (m *Map) Flags() uint32 { + if m.m != nil { + return m.m.Flags() + } + if m.spec != nil { + return m.spec.Flags + } + return 0 +} + +func (m *Map) updateMetrics() { + if m.group == "" { + return + } + metrics.UpdateMapCapacity(m.group, m.MaxEntries()) +} + +// NewMap creates a new Map instance - object representing a BPF map +func NewMap(name string, mapType ebpf.MapType, mapKey MapKey, mapValue MapValue, + maxEntries int, flags uint32) *Map { + + keySize := reflect.TypeOf(mapKey).Elem().Size() + valueSize := reflect.TypeOf(mapValue).Elem().Size() + + return &Map{ + spec: &ebpf.MapSpec{ + Type: mapType, + Name: path.Base(name), + KeySize: uint32(keySize), + ValueSize: uint32(valueSize), + MaxEntries: uint32(maxEntries), + Flags: flags, + }, + name: path.Base(name), + key: mapKey, + value: mapValue, + group: name, + } +} + +// NewMap creates a new Map instance - object representing a BPF map +func NewMapWithInnerSpec(name string, mapType ebpf.MapType, mapKey MapKey, mapValue MapValue, + maxEntries int, flags uint32, innerSpec *ebpf.MapSpec) *Map { + + keySize := reflect.TypeOf(mapKey).Elem().Size() + valueSize := reflect.TypeOf(mapValue).Elem().Size() + + return &Map{ + spec: &ebpf.MapSpec{ + Type: mapType, + Name: path.Base(name), + KeySize: uint32(keySize), + ValueSize: uint32(valueSize), + MaxEntries: uint32(maxEntries), + Flags: flags, + InnerMap: innerSpec, + }, + name: path.Base(name), + key: mapKey, + value: mapValue, + } +} + +func (m *Map) commonName() string { + if m.cachedCommonName != "" { + return m.cachedCommonName + } + + m.cachedCommonName = extractCommonName(m.name) + return m.cachedCommonName +} + +func (m *Map) NonPrefixedName() string { + return strings.TrimPrefix(m.name, metrics.Namespace+"_") +} + +// scheduleErrorResolver schedules a periodic resolver controller that scans +// all BPF map caches for unresolved errors and attempts to resolve them. On +// error of resolution, the controller is-rescheduled in an expedited manner +// with an exponential back-off. +// +// m.lock must be held for writing +func (m *Map) scheduleErrorResolver() { + m.outstandingErrors = true + + if time.Since(m.errorResolverLastScheduled) <= errorResolverSchedulerMinInterval { + return + } + + m.errorResolverLastScheduled = time.Now() + + go func() { + time.Sleep(errorResolverSchedulerDelay) + mapControllers.UpdateController(m.controllerName(), + controller.ControllerParams{ + Group: bpfMapSyncControllerGroup, + DoFunc: m.resolveErrors, + RunInterval: errorResolverSchedulerMinInterval, + }, + ) + }() + +} + +// WithCache enables use of a cache. This will store all entries inserted from +// user space in a local cache (map) and will indicate the status of each +// individual entry. +func (m *Map) WithCache() *Map { + if m.cache == nil { + m.cache = map[string]*cacheEntry{} + } + m.withValueCache = true + m.enableSync = true + return m +} + +// WithEvents enables use of the event buffer, if the buffer is enabled. +// This stores all map events (i.e. add/update/delete) in a bounded event buffer. +// If eventTTL is not zero, than events that are older than the TTL +// will periodically be removed from the buffer. +// Enabling events will use aprox proportional to 100MB for every million capacity +// in maxSize. +// +// TODO: The IPCache map have many periodic update events added by a controller for entries such as the 0.0.0.0/0 range. +// These fill the event buffer with possibly unnecessary events. +// We should either provide an option to aggregate these events, ignore hem from the ipcache event buffer or store them in a separate buffer. +func (m *Map) WithEvents(c option.BPFEventBufferConfig) *Map { + if !c.Enabled { + return m + } + m.scopedLogger().WithFields(logrus.Fields{ + "size": c.MaxSize, + "ttl": c.TTL, + }).Debug("enabling events buffer") + m.eventsBufferEnabled = true + m.initEventsBuffer(c.MaxSize, c.TTL) + return m +} + +func (m *Map) WithGroupName(group string) *Map { + m.group = group + return m +} + +// WithPressureMetricThreshold enables the tracking of a metric that measures +// the pressure of this map. This metric is only reported if over the +// threshold. +func (m *Map) WithPressureMetricThreshold(threshold float64) *Map { + // When pressure metric is enabled, we keep track of map keys in cache + if m.cache == nil { + m.cache = map[string]*cacheEntry{} + } + + m.pressureGauge = metrics.NewBPFMapPressureGauge(m.NonPrefixedName(), threshold) + + return m +} + +// WithPressureMetric enables tracking and reporting of this map pressure with +// threshold 0. +func (m *Map) WithPressureMetric() *Map { + return m.WithPressureMetricThreshold(0.0) +} + +// UpdatePressureMetricWithSize updates map pressure metric using the given map size. +func (m *Map) UpdatePressureMetricWithSize(size int32) { + if m.pressureGauge == nil { + return + } + + // Do a lazy check of MetricsConfig as it is not available at map static + // initialization. + if !metrics.BPFMapPressure { + if !m.withValueCache { + m.cache = nil + } + m.pressureGauge = nil + return + } + + pvalue := float64(size) / float64(m.MaxEntries()) + m.pressureGauge.Set(pvalue) +} + +func (m *Map) updatePressureMetric() { + // Skipping pressure metric gauge updates for LRU map as the cache size + // does not accurately represent the actual map sie. + if m.spec != nil && m.spec.Type == ebpf.LRUHash { + return + } + m.UpdatePressureMetricWithSize(int32(len(m.cache))) +} + +func (m *Map) FD() int { + return m.m.FD() +} + +// Name returns the basename of this map. +func (m *Map) Name() string { + return m.name +} + +// Path returns the path to this map on the filesystem. +func (m *Map) Path() (string, error) { + if err := m.setPathIfUnset(); err != nil { + return "", err + } + + return m.path, nil +} + +// Unpin attempts to unpin (remove) the map from the filesystem. +func (m *Map) Unpin() error { + path, err := m.Path() + if err != nil { + return err + } + + return os.RemoveAll(path) +} + +// UnpinIfExists tries to unpin (remove) the map only if it exists. +func (m *Map) UnpinIfExists() error { + found, err := m.exist() + if err != nil { + return err + } + + if !found { + return nil + } + + return m.Unpin() +} + +func (m *Map) controllerName() string { + return fmt.Sprintf("bpf-map-sync-%s", m.name) +} + +// OpenMap opens the map at pinPath. +func OpenMap(pinPath string, key MapKey, value MapValue) (*Map, error) { + if !path.IsAbs(pinPath) { + return nil, fmt.Errorf("pinPath must be absolute: %s", pinPath) + } + + em, err := ebpf.LoadPinnedMap(pinPath, nil) + if err != nil { + return nil, err + } + + m := &Map{ + m: em, + name: path.Base(pinPath), + path: pinPath, + key: key, + value: value, + } + + m.updateMetrics() + registerMap(pinPath, m) + + return m, nil +} + +func (m *Map) setPathIfUnset() error { + if m.path == "" { + if m.name == "" { + return fmt.Errorf("either path or name must be set") + } + + m.path = MapPath(m.name) + } + + return nil +} + +// Recreate removes any pin at the Map's pin path, recreates and re-pins it. +func (m *Map) Recreate() error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.m != nil { + return fmt.Errorf("map already open: %s", m.name) + } + + if err := m.setPathIfUnset(); err != nil { + return err + } + + if err := os.Remove(m.path); err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("removing pinned map %s: %w", m.name, err) + } + + m.scopedLogger().Infof("Removed map pin at %s, recreating and re-pinning map %s", m.path, m.name) + + return m.openOrCreate(true) +} + +// OpenOrCreate attempts to open the Map, or if it does not yet exist, create +// the Map. If the existing map's attributes such as map type, key/value size, +// capacity, etc. do not match the Map's attributes, then the map will be +// deleted and reopened without any attempt to retain its previous contents. +// If the map is marked as non-persistent, it will always be recreated. +// +// Returns whether the map was deleted and recreated, or an optional error. +func (m *Map) OpenOrCreate() error { + m.lock.Lock() + defer m.lock.Unlock() + + return m.openOrCreate(true) +} + +// CreateUnpinned creates the map without pinning it to the file system. +// +// TODO(tb): Remove this when all map creation takes MapSpec. +func (m *Map) CreateUnpinned() error { + m.lock.Lock() + defer m.lock.Unlock() + + return m.openOrCreate(false) +} + +// Create is similar to OpenOrCreate, but closes the map after creating or +// opening it. +func (m *Map) Create() error { + if err := m.OpenOrCreate(); err != nil { + return err + } + return m.Close() +} + +func (m *Map) openOrCreate(pin bool) error { + if m.m != nil { + return nil + } + + if m.spec == nil { + return fmt.Errorf("attempted to create map %s without MapSpec", m.name) + } + + if err := m.setPathIfUnset(); err != nil { + return err + } + + m.spec.Flags |= GetPreAllocateMapFlags(m.spec.Type) + + if m.spec.InnerMap != nil { + m.spec.InnerMap.Flags |= GetPreAllocateMapFlags(m.spec.InnerMap.Type) + } + + if pin { + m.spec.Pinning = ebpf.PinByName + } + + em, err := OpenOrCreateMap(m.spec, path.Dir(m.path)) + if err != nil { + return err + } + + m.updateMetrics() + registerMap(m.path, m) + + // Consume the MapSpec. + m.spec = nil + + // Retain the Map. + m.m = em + + return nil +} + +// Open opens the BPF map. All calls to Open() are serialized due to acquiring +// m.lock +func (m *Map) Open() error { + m.lock.Lock() + defer m.lock.Unlock() + + return m.open() +} + +// open opens the BPF map. It is identical to Open() but should be used when +// m.lock is already held. open() may only be used if m.lock is held for +// writing. +func (m *Map) open() error { + if m.m != nil { + return nil + } + + if err := m.setPathIfUnset(); err != nil { + return err + } + + em, err := ebpf.LoadPinnedMap(m.path, nil) + if err != nil { + return fmt.Errorf("loading pinned map %s: %w", m.path, err) + } + + m.updateMetrics() + registerMap(m.path, m) + + m.m = em + + return nil +} + +func (m *Map) Close() error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.enableSync { + mapControllers.RemoveController(m.controllerName()) + } + + if m.m != nil { + m.m.Close() + m.m = nil + } + + unregisterMap(m.path, m) + + return nil +} + +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + var duration *spanstat.SpanStat + if metrics.BPFSyscallDuration.IsEnabled() { + duration = spanstat.Start() + } + + err := m.m.NextKey(key, nextKeyOut) + + if metrics.BPFSyscallDuration.IsEnabled() { + metrics.BPFSyscallDuration.WithLabelValues(metricOpGetNextKey, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds()) + } + + return err +} + +type DumpCallback func(key MapKey, value MapValue) + +// DumpWithCallback iterates over the Map and calls the given DumpCallback for +// each map entry. With the current implementation, it is safe for callbacks to +// retain the values received, as they are guaranteed to be new instances. +// +// TODO(tb): This package currently doesn't support dumping per-cpu maps, as +// ReadValueSize is always set to the size of a single value. +func (m *Map) DumpWithCallback(cb DumpCallback) error { + if cb == nil { + return errors.New("empty callback") + } + + if err := m.Open(); err != nil { + return err + } + + m.lock.RLock() + defer m.lock.RUnlock() + + // Don't need deep copies here, only fresh pointers. + mk := m.key.New() + mv := m.value.New() + + i := m.m.Iterate() + for i.Next(mk, mv) { + cb(mk, mv) + + mk = m.key.New() + mv = m.value.New() + } + + return i.Err() +} + +// DumpWithCallbackIfExists is similar to DumpWithCallback, but returns earlier +// if the given map does not exist. +func (m *Map) DumpWithCallbackIfExists(cb DumpCallback) error { + found, err := m.exist() + if err != nil { + return err + } + + if found { + return m.DumpWithCallback(cb) + } + + return nil +} + +// DumpReliablyWithCallback is similar to DumpWithCallback, but performs +// additional tracking of the current and recently seen keys, so that if an +// element is removed from the underlying kernel map during the dump, the dump +// can continue from a recently seen key rather than restarting from scratch. +// In addition, it caps the maximum number of map entry iterations at 4 times +// the maximum map size. If this limit is reached, ErrMaxLookup is returned. +// +// The caller must provide a callback for handling each entry, and a stats +// object initialized via a call to NewDumpStats(). +func (m *Map) DumpReliablyWithCallback(cb DumpCallback, stats *DumpStats) error { + if cb == nil { + return errors.New("empty callback") + } + + if stats == nil { + return errors.New("stats is nil") + } + + var ( + prevKey = m.key.New() + currentKey = m.key.New() + nextKey = m.key.New() + value = m.value.New() + + prevKeyValid = false + ) + + stats.start() + defer stats.finish() + + if err := m.Open(); err != nil { + return err + } + + // Get the first map key. + if err := m.NextKey(nil, currentKey); err != nil { + stats.Lookup = 1 + if errors.Is(err, ebpf.ErrKeyNotExist) { + // Empty map, nothing to iterate. + stats.Completed = true + return nil + } + } + + // maxLookup is an upper bound limit to prevent backtracking forever + // when iterating over the map's elements (the map might be concurrently + // updated while being iterated) + maxLookup := stats.MaxEntries * 4 + + // This loop stops when all elements have been iterated (Map.NextKey() returns + // ErrKeyNotExist) OR, in order to avoid hanging if + // the map is continuously updated, when maxLookup has been reached + for stats.Lookup = 1; stats.Lookup <= maxLookup; stats.Lookup++ { + // currentKey was set by the first m.NextKey() above. We know it existed in + // the map, but it may have been deleted by a concurrent map operation. + // + // If currentKey is no longer in the map, nextKey may be the first key in + // the map again. Continue with nextKey only if we still find currentKey in + // the Lookup() after the call to m.NextKey(), this way we know nextKey is + // NOT the first key in the map and iteration hasn't reset. + nextKeyErr := m.NextKey(currentKey, nextKey) + + if err := m.m.Lookup(currentKey, value); err != nil { + stats.LookupFailed++ + // Restarting from a invalid key starts the iteration again from the beginning. + // If we have a previously found key, try to restart from there instead + if prevKeyValid { + currentKey = prevKey + // Restart from a given previous key only once, otherwise if the prevKey is + // concurrently deleted we might loop forever trying to look it up. + prevKeyValid = false + stats.KeyFallback++ + } else { + // Depending on exactly when currentKey was deleted from the + // map, nextKey may be the actual key element after the deleted + // one, or the first element in the map. + currentKey = nextKey + // To avoid having nextKey and currentKey pointing at the same memory + // we allocate a new key for nextKey. Without this currentKey and nextKey + // would be the same pointer value and would get double iterated on the next + // iterations m.NextKey(...) call. + nextKey = m.key.New() + stats.Interrupted++ + } + continue + } + + cb(currentKey, value) + + if nextKeyErr != nil { + if errors.Is(nextKeyErr, ebpf.ErrKeyNotExist) { + stats.Completed = true + return nil // end of map, we're done iterating + } + return nextKeyErr + } + + // Prepare keys to move to the next iteration. + prevKey = currentKey + currentKey = nextKey + nextKey = m.key.New() + prevKeyValid = true + } + + return ErrMaxLookup +} + +// Dump returns the map (type map[string][]string) which contains all +// data stored in BPF map. +func (m *Map) Dump(hash map[string][]string) error { + callback := func(key MapKey, value MapValue) { + // No need to deep copy since we are creating strings. + hash[key.String()] = append(hash[key.String()], value.String()) + } + + if err := m.DumpWithCallback(callback); err != nil { + return err + } + + return nil +} + +// DumpIfExists dumps the contents of the map into hash via Dump() if the map +// file exists +func (m *Map) DumpIfExists(hash map[string][]string) error { + found, err := m.exist() + if err != nil { + return err + } + + if found { + return m.Dump(hash) + } + + return nil +} + +func (m *Map) Lookup(key MapKey) (MapValue, error) { + if err := m.Open(); err != nil { + return nil, err + } + + m.lock.RLock() + defer m.lock.RUnlock() + + var duration *spanstat.SpanStat + if metrics.BPFSyscallDuration.IsEnabled() { + duration = spanstat.Start() + } + + value := m.value.New() + err := m.m.Lookup(key, value) + + if metrics.BPFSyscallDuration.IsEnabled() { + metrics.BPFSyscallDuration.WithLabelValues(metricOpLookup, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds()) + } + + if err != nil { + return nil, err + } + + return value, nil +} + +func (m *Map) Update(key MapKey, value MapValue) error { + var err error + + m.lock.Lock() + defer m.lock.Unlock() + + defer func() { + desiredAction := OK + if err != nil { + desiredAction = Insert + } + entry := &cacheEntry{ + Key: key, + Value: value, + DesiredAction: desiredAction, + LastError: err, + } + m.addToEventsLocked(MapUpdate, *entry) + + if m.cache == nil { + return + } + + if m.withValueCache { + if err != nil { + m.scheduleErrorResolver() + } + m.cache[key.String()] = &cacheEntry{ + Key: key, + Value: value, + DesiredAction: desiredAction, + LastError: err, + } + m.updatePressureMetric() + } else if err == nil { + m.cache[key.String()] = nil + m.updatePressureMetric() + } + }() + + if err = m.open(); err != nil { + return err + } + + err = m.m.Update(key, value, ebpf.UpdateAny) + + if metrics.BPFMapOps.IsEnabled() { + metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpUpdate, metrics.Error2Outcome(err)).Inc() + } + + if err != nil { + return fmt.Errorf("update map %s: %w", m.Name(), err) + } + + return nil +} + +// deleteMapEvent is run at every delete map event. +// If cache is enabled, it will update the cache to reflect the delete. +// As well, if event buffer is enabled, it adds a new event to the buffer. +func (m *Map) deleteMapEvent(key MapKey, err error) { + m.addToEventsLocked(MapDelete, cacheEntry{ + Key: key, + DesiredAction: Delete, + LastError: err, + }) + m.deleteCacheEntry(key, err) +} + +func (m *Map) deleteAllMapEvent() { + m.addToEventsLocked(MapDeleteAll, cacheEntry{}) +} + +// deleteCacheEntry evaluates the specified error, if nil the map key is +// removed from the cache to indicate successful deletion. If non-nil, the map +// key entry in the cache is updated to indicate deletion failure with the +// specified error. +// +// Caller must hold m.lock for writing +func (m *Map) deleteCacheEntry(key MapKey, err error) { + if m.cache == nil { + return + } + + k := key.String() + if err == nil { + delete(m.cache, k) + } else if !m.withValueCache { + return + } else { + entry, ok := m.cache[k] + if !ok { + m.cache[k] = &cacheEntry{ + Key: key, + } + entry = m.cache[k] + } + + entry.DesiredAction = Delete + entry.LastError = err + m.scheduleErrorResolver() + } +} + +// delete deletes the map entry corresponding to the given key. If ignoreMissing +// is set to true and the entry was not found, the error metric is not +// incremented for missing entries and nil error is returned. +func (m *Map) delete(key MapKey, ignoreMissing bool) (_ bool, err error) { + defer func() { + m.deleteMapEvent(key, err) + if err != nil { + m.updatePressureMetric() + } + }() + + if err = m.open(); err != nil { + return false, err + } + + var duration *spanstat.SpanStat + if metrics.BPFSyscallDuration.IsEnabled() { + duration = spanstat.Start() + } + + err = m.m.Delete(key) + + if metrics.BPFSyscallDuration.IsEnabled() { + metrics.BPFSyscallDuration.WithLabelValues(metricOpDelete, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds()) + } + + if errors.Is(err, ebpf.ErrKeyNotExist) && ignoreMissing { + // Error and metrics handling is skipped in case ignoreMissing is set and + // the map key did not exist. This removes false positives in the delete + // metrics and skips the deferred cleanup of nonexistent entries. This + // situation occurs at least in the context of cleanup of NAT mappings from + // CT GC. + return false, nil + } + + if metrics.BPFMapOps.IsEnabled() { + // err can be nil or any error other than ebpf.ErrKeyNotExist. + metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpDelete, metrics.Error2Outcome(err)).Inc() + } + + if err != nil { + return false, fmt.Errorf("unable to delete element %s from map %s: %w", key, m.name, err) + } + + return true, nil +} + +// SilentDelete deletes the map entry corresponding to the given key. +// If a map entry is not found this returns (false, nil). +func (m *Map) SilentDelete(key MapKey) (deleted bool, err error) { + m.lock.Lock() + defer m.lock.Unlock() + + return m.delete(key, true) +} + +// Delete deletes the map entry corresponding to the given key. +func (m *Map) Delete(key MapKey) error { + m.lock.Lock() + defer m.lock.Unlock() + + _, err := m.delete(key, false) + return err +} + +// scopedLogger returns a logger scoped for the map. m.lock must be held. +func (m *Map) scopedLogger() *logrus.Entry { + return log.WithFields(logrus.Fields{logfields.Path: m.path, "name": m.name}) +} + +// DeleteAll deletes all entries of a map by traversing the map and deleting individual +// entries. Note that if entries are added while the taversal is in progress, +// such entries may survive the deletion process. +func (m *Map) DeleteAll() error { + m.lock.Lock() + defer m.lock.Unlock() + defer m.updatePressureMetric() + scopedLog := m.scopedLogger() + scopedLog.Debug("deleting all entries in map") + + if m.withValueCache { + // Mark all entries for deletion, upon successful deletion, + // entries will be removed or the LastError will be updated + for _, entry := range m.cache { + entry.DesiredAction = Delete + entry.LastError = fmt.Errorf("deletion pending") + } + } + + if err := m.open(); err != nil { + return err + } + + mk := m.key.New() + mv := make([]byte, m.ValueSize()) + + defer m.deleteAllMapEvent() + + i := m.m.Iterate() + for i.Next(mk, &mv) { + err := m.m.Delete(mk) + + m.deleteCacheEntry(mk, err) + + if err != nil { + return err + } + } + + err := i.Err() + if err != nil { + scopedLog.WithError(err).Warningf("Unable to correlate iteration key %v with cache entry. Inconsistent cache.", mk) + } + + return err +} + +// GetModel returns a BPF map in the representation served via the API +func (m *Map) GetModel() *models.BPFMap { + m.lock.RLock() + defer m.lock.RUnlock() + + mapModel := &models.BPFMap{ + Path: m.path, + } + + if m.withValueCache { + mapModel.Cache = make([]*models.BPFMapEntry, len(m.cache)) + i := 0 + for k, entry := range m.cache { + model := &models.BPFMapEntry{ + Key: k, + DesiredAction: entry.DesiredAction.String(), + } + + if entry.LastError != nil { + model.LastError = entry.LastError.Error() + } + + if entry.Value != nil { + model.Value = entry.Value.String() + } + mapModel.Cache[i] = model + i++ + } + } + + return mapModel +} + +func (m *Map) addToEventsLocked(action Action, entry cacheEntry) { + if !m.eventsBufferEnabled { + return + } + m.events.add(&Event{ + action: action, + Timestamp: time.Now(), + cacheEntry: entry, + }) +} + +// resolveErrors is schedule by scheduleErrorResolver() and runs periodically. +// It resolves up to maxSyncErrors discrepancies between cache and BPF map in +// the kernel. +func (m *Map) resolveErrors(ctx context.Context) error { + started := time.Now() + + m.lock.Lock() + defer m.lock.Unlock() + + if m.cache == nil { + return nil + } + + if !m.outstandingErrors { + return nil + } + + outstanding := 0 + for _, e := range m.cache { + switch e.DesiredAction { + case Insert, Delete: + outstanding++ + } + } + + // Errors appear to have already been resolved. This can happen if a subsequent + // Update/Delete operation acting on the same key succeeded. + if outstanding == 0 { + m.outstandingErrors = false + return nil + } + + if err := m.open(); err != nil { + return err + } + + scopedLogger := m.scopedLogger() + scopedLogger.WithField("remaining", outstanding). + Debug("Starting periodic BPF map error resolver") + + resolved := 0 + scanned := 0 + nerr := 0 + for k, e := range m.cache { + scanned++ + + switch e.DesiredAction { + case OK: + case Insert: + // Call into ebpf-go's Map.Update() directly, don't go through the cache. + err := m.m.Update(e.Key, e.Value, ebpf.UpdateAny) + if metrics.BPFMapOps.IsEnabled() { + metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpUpdate, metrics.Error2Outcome(err)).Inc() + } + if err == nil { + e.DesiredAction = OK + e.LastError = nil + resolved++ + outstanding-- + } else { + e.LastError = err + nerr++ + } + m.cache[k] = e + m.addToEventsLocked(MapUpdate, *e) + case Delete: + // Holding lock, issue direct delete on map. + err := m.m.Delete(e.Key) + if metrics.BPFMapOps.IsEnabled() { + metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpDelete, metrics.Error2Outcome(err)).Inc() + } + if err == nil || errors.Is(err, ebpf.ErrKeyNotExist) { + delete(m.cache, k) + resolved++ + outstanding-- + } else { + e.LastError = err + nerr++ + m.cache[k] = e + } + + m.addToEventsLocked(MapDelete, *e) + } + + // bail out if maximum errors are reached to relax the map lock + if nerr > maxSyncErrors { + break + } + } + + m.updatePressureMetric() + + scopedLogger.WithFields(logrus.Fields{ + "remaining": outstanding, + "resolved": resolved, + "scanned": scanned, + "duration": time.Since(started), + }).Debug("BPF map error resolver completed") + + m.outstandingErrors = outstanding > 0 + if m.outstandingErrors { + return fmt.Errorf("%d map sync errors", outstanding) + } + + return nil +} + +// CheckAndUpgrade checks the received map's properties (for the map currently +// loaded into the kernel) against the desired properties, and if they do not +// match, deletes the map. +// +// Returns true if the map was upgraded. +func (m *Map) CheckAndUpgrade(desired *Map) bool { + flags := desired.Flags() | GetPreAllocateMapFlags(desired.Type()) + + return objCheck( + m.m, + m.path, + desired.Type(), + desired.KeySize(), + desired.ValueSize(), + desired.MaxEntries(), + flags, + ) +} + +func (m *Map) exist() (bool, error) { + path, err := m.Path() + if err != nil { + return false, err + } + + if _, err := os.Stat(path); err == nil { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/map_register_linux.go b/vendor/github.com/cilium/cilium/pkg/bpf/map_register_linux.go new file mode 100644 index 0000000000..d0b8e3cafa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/map_register_linux.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +package bpf + +import ( + "path" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/lock" +) + +var ( + mutex lock.RWMutex + mapRegister = map[string]*Map{} +) + +func registerMap(path string, m *Map) { + mutex.Lock() + mapRegister[path] = m + mutex.Unlock() + + log.WithField("path", path).Debug("Registered BPF map") +} + +func unregisterMap(path string, m *Map) { + mutex.Lock() + delete(mapRegister, path) + mutex.Unlock() + + log.WithField("path", path).Debug("Unregistered BPF map") +} + +// GetMap returns the registered map with the given name or absolute path +func GetMap(name string) *Map { + mutex.RLock() + defer mutex.RUnlock() + + if !path.IsAbs(name) { + name = MapPath(name) + } + + return mapRegister[name] +} + +// GetOpenMaps returns a slice of all open BPF maps. This is identical to +// calling GetMap() on all open maps. +func GetOpenMaps() []*models.BPFMap { + // create a copy of mapRegister so we can unlock the mutex again as + // locking Map.lock inside of the mutex is not permitted + mutex.RLock() + maps := make([]*Map, 0, len(mapRegister)) + for _, m := range mapRegister { + maps = append(maps, m) + } + mutex.RUnlock() + + mapList := make([]*models.BPFMap, len(maps)) + + i := 0 + for _, m := range maps { + mapList[i] = m.GetModel() + i++ + } + + return mapList +} diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/metrics.go b/vendor/github.com/cilium/cilium/pkg/bpf/metrics.go new file mode 100644 index 0000000000..da5e600af8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/metrics.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +const ( + metricOpCreate = "create" + metricOpUpdate = "update" + metricOpLookup = "lookup" + metricOpDelete = "delete" + metricOpGetNextKey = "getNextKey" +) diff --git a/vendor/github.com/cilium/cilium/pkg/bpf/stats_linux.go b/vendor/github.com/cilium/cilium/pkg/bpf/stats_linux.go new file mode 100644 index 0000000000..e6749b360c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/bpf/stats_linux.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +package bpf + +import ( + "github.com/cilium/cilium/pkg/time" +) + +// DumpStats tracks statistics over the dump of a map. +type DumpStats struct { + // Started is the timestamp when the gc run was started. + Started time.Time + + // Finished is the timestamp when the gc run completed. + Finished time.Time + + // Lookup is the number of key lookups performed. + Lookup uint32 + + // LookupFailed is the number of key lookups that failed. + LookupFailed uint32 + + // PrevKeyUnavailable is the number of times the previous key was not + // available. + PrevKeyUnavailable uint32 + + // KeyFallback is the number of times the current key became invalid + // while traversing and we had to fall back to the previous key. + KeyFallback uint32 + + // MaxEntries is the maximum number of entries in the gc table. + MaxEntries uint32 + + // Interrupted is the number of times the gc run was interrupted and + // had to start from scratch. + Interrupted uint32 + + // Completed is true when the gc run has been completed. + Completed bool +} + +// NewDumpStats returns a new stats structure for collecting dump statistics. +func NewDumpStats(m *Map) *DumpStats { + return &DumpStats{ + MaxEntries: m.MaxEntries(), + } +} + +// start starts the dump. +func (d *DumpStats) start() { + d.Started = time.Now() +} + +// finish finishes the dump. +func (d *DumpStats) finish() { + d.Finished = time.Now() +} + +// Duration returns the duration of the dump. +func (d *DumpStats) Duration() time.Duration { + return d.Finished.Sub(d.Started) +} diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go new file mode 100644 index 0000000000..caa855d214 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package byteorder + +import ( + "net" + "net/netip" +) + +// NetIPv4ToHost32 converts an net.IP to a uint32 in host byte order. ip +// must be a IPv4 address, otherwise the function will panic. +func NetIPv4ToHost32(ip net.IP) uint32 { + ipv4 := ip.To4() + _ = ipv4[3] // Assert length of ipv4. + return Native.Uint32(ipv4) +} + +func NetIPAddrToHost32(ip netip.Addr) uint32 { + ipv4 := ip.As4() + return Native.Uint32(ipv4[:]) +} diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go new file mode 100644 index 0000000000..7b0873f82d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build armbe || arm64be || mips || mips64 || ppc64 + +package byteorder + +import "encoding/binary" + +var Native binary.ByteOrder = binary.BigEndian + +func HostToNetwork16(u uint16) uint16 { return u } +func HostToNetwork32(u uint32) uint32 { return u } +func HostToNetwork64(u uint64) uint64 { return u } +func NetworkToHost16(u uint16) uint16 { return u } +func NetworkToHost32(u uint32) uint32 { return u } +func NetworkToHost64(u uint64) uint64 { return u } diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go new file mode 100644 index 0000000000..0fffee1b24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build 386 || amd64 || arm || arm64 || mips64le || ppc64le || riscv64 || wasm + +package byteorder + +import ( + "encoding/binary" + "math/bits" +) + +var Native binary.ByteOrder = binary.LittleEndian + +func HostToNetwork16(u uint16) uint16 { return bits.ReverseBytes16(u) } +func HostToNetwork32(u uint32) uint32 { return bits.ReverseBytes32(u) } +func HostToNetwork64(u uint64) uint64 { return bits.ReverseBytes64(u) } +func NetworkToHost16(u uint16) uint16 { return bits.ReverseBytes16(u) } +func NetworkToHost32(u uint32) uint32 { return bits.ReverseBytes32(u) } +func NetworkToHost64(u uint64) uint64 { return bits.ReverseBytes64(u) } diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go b/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go new file mode 100644 index 0000000000..1c2497c758 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package byteorder provides functions to convert from and to network byte order. +package byteorder diff --git a/vendor/github.com/cilium/cilium/pkg/common/const.go b/vendor/github.com/cilium/cilium/pkg/common/const.go new file mode 100644 index 0000000000..365d9ae288 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/common/const.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package common + +const ( + // Miscellaneous dedicated constants + + // CHeaderFileName is the name of the C header file for BPF programs for a + // particular endpoint. + CHeaderFileName = "ep_config.h" + + // PossibleCPUSysfsPath is used to retrieve the number of CPUs for per-CPU maps. + PossibleCPUSysfsPath = "/sys/devices/system/cpu/possible" +) diff --git a/vendor/github.com/cilium/cilium/pkg/common/utils.go b/vendor/github.com/cilium/cilium/pkg/common/utils.go new file mode 100644 index 0000000000..b8a5a5d400 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/common/utils.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package common + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/safeio" +) + +// C2GoArray transforms an hexadecimal string representation into a byte slice. +// Example: +// str := "0x12, 0xff, 0x0, 0x1" +// fmt.Print(C2GoArray(str)) //`{0x12, 0xFF, 0x0, 0x01}`" +func C2GoArray(str string) []byte { + ret := []byte{} + + if str == "" { + return ret + } + + hexStr := strings.Split(str, ", ") + for _, hexDigit := range hexStr { + strDigit := strings.TrimPrefix(hexDigit, "0x") + digitUint64, err := strconv.ParseUint(strDigit, 16, 8) + if err != nil { + return nil + } + ret = append(ret, byte(digitUint64)) + } + return ret +} + +// GoArray2C transforms a byte slice into its hexadecimal string representation. +// Example: +// array := []byte{0x12, 0xFF, 0x0, 0x01} +// fmt.Print(GoArray2C(array)) // "{ 0x12, 0xff, 0x0, 0x1 }" +func GoArray2C(array []byte) string { + return goArray2C(array, true) +} + +// GoArray2CNoSpaces does the same as GoArray2C, but no spaces are used in +// the final output. +// Example: +// array := []byte{0x12, 0xFF, 0x0, 0x01} +// fmt.Print(GoArray2CNoSpaces(array)) // "{0x12,0xff,0x0,0x1}" +func GoArray2CNoSpaces(array []byte) string { + return goArray2C(array, false) +} + +func goArray2C(array []byte, space bool) string { + ret := "" + format := ",%#x" + if space { + format = ", %#x" + } + + for i, e := range array { + if i == 0 { + ret = ret + fmt.Sprintf("%#x", e) + } else { + ret = ret + fmt.Sprintf(format, e) + } + } + return ret +} + +// RequireRootPrivilege checks if the user running cmd is root. If not, it exits the program +func RequireRootPrivilege(cmd string) { + if os.Getuid() != 0 { + fmt.Fprintf(os.Stderr, "Please run %q command(s) with root privileges.\n", cmd) + os.Exit(1) + } +} + +// MapStringStructToSlice returns a slice with all keys of the given +// map[string]struct{} +func MapStringStructToSlice(m map[string]struct{}) []string { + s := make([]string, 0, len(m)) + for k := range m { + s = append(s, k) + } + return s +} + +// GetNumPossibleCPUs returns a total number of possible CPUS, i.e. CPUs that +// have been allocated resources and can be brought online if they are present. +// The number is retrieved by parsing /sys/devices/system/cpu/possible. +// +// See https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/cpumask.h?h=v4.19#n50 +// for more details. +func GetNumPossibleCPUs(log logrus.FieldLogger) int { + f, err := os.Open(PossibleCPUSysfsPath) + if err != nil { + log.WithError(err).Errorf("unable to open %q", PossibleCPUSysfsPath) + return 0 + } + defer f.Close() + + return getNumPossibleCPUsFromReader(log, f) +} + +func getNumPossibleCPUsFromReader(log logrus.FieldLogger, r io.Reader) int { + out, err := safeio.ReadAllLimit(r, safeio.KB) + if err != nil { + log.WithError(err).Errorf("unable to read %q to get CPU count", PossibleCPUSysfsPath) + return 0 + } + + var start, end int + count := 0 + for _, s := range strings.Split(string(out), ",") { + // Go's scanf will return an error if a format cannot be fully matched. + // So, just ignore it, as a partial match (e.g. when there is only one + // CPU) is expected. + n, err := fmt.Sscanf(s, "%d-%d", &start, &end) + + switch n { + case 0: + log.WithError(err).Errorf("failed to scan %q to retrieve number of possible CPUs!", s) + return 0 + case 1: + count++ + default: + count += (end - start + 1) + } + } + + return count +} diff --git a/vendor/github.com/cilium/cilium/pkg/container/ring_buffer.go b/vendor/github.com/cilium/cilium/pkg/container/ring_buffer.go new file mode 100644 index 0000000000..985b688247 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/container/ring_buffer.go @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package container + +import ( + "sort" +) + +// RingBuffer is a generic ring buffer implementation that contains +// sequential data (i.e. such as time ordered data). +// RingBuffer is implemented using slices. From testing, this should +// be fast than linked-list implementations, and also allows for efficient +// indexing of ordered data. +type RingBuffer struct { + buffer []interface{} + next int // index of ring buffer head. + maxSize int +} + +// NewRingBuffer constructs a new ring buffer for a given buffer size. +func NewRingBuffer(bufferSize int) *RingBuffer { + return &RingBuffer{ + buffer: make([]interface{}, 0, bufferSize), + maxSize: bufferSize, + } +} + +func (eb *RingBuffer) isFull() bool { + return len(eb.buffer) >= eb.maxSize +} + +func (eb *RingBuffer) incr() { + eb.next = (eb.next + 1) % eb.maxSize +} + +// Add adds an element to the buffer. +func (eb *RingBuffer) Add(e interface{}) { + if eb.maxSize == 0 { + return + } + if eb.isFull() { + eb.buffer[eb.next] = e + eb.incr() + return + } + eb.incr() + eb.buffer = append(eb.buffer, e) +} + +func (eb *RingBuffer) dumpWithCallback(callback func(v interface{})) { + for i := 0; i < len(eb.buffer); i++ { + callback(eb.at(i)) + } +} + +func (eb *RingBuffer) at(i int) interface{} { + return eb.buffer[eb.mapIndex(i)] +} + +// firstValidIndex returns the first **absolute** index in the buffer that satisfies +// isValid. +// note: this value needs to be mapped before indexing the buffer. +func (eb *RingBuffer) firstValidIndex(isValid func(interface{}) bool) int { + return sort.Search(len(eb.buffer), func(i int) bool { + return isValid(eb.at(i)) + }) +} + +// IterateValid calls the callback on each element of the buffer, starting with +// the first element in the buffer that satisfies "isValid". +func (eb *RingBuffer) IterateValid(isValid func(interface{}) bool, callback func(interface{})) { + startIndex := eb.firstValidIndex(isValid) + l := len(eb.buffer) - startIndex + for i := 0; i < l; i++ { + index := eb.mapIndex(startIndex + i) + callback(eb.buffer[index]) + } +} + +// maps index in [0:len(buffer)) to the actual index in buffer. +func (eb *RingBuffer) mapIndex(indexOffset int) int { + ret := (eb.next + indexOffset) % len(eb.buffer) + return ret +} + +// Compact clears out invalidated elements in the buffer. +// This may require copying the entire buffer. +// It is assumed that if buffer[i] is invalid then every entry [0...i-1] is also not valid. +func (eb *RingBuffer) Compact(isValid func(interface{}) bool) { + if len(eb.buffer) == 0 { + return + } + startIndex := eb.firstValidIndex(isValid) + // In this case, we compact the entire buffer. + if startIndex >= len(eb.buffer) { + eb.buffer = []interface{}{} + eb.next = 0 + return + } + + mappedStart := eb.mapIndex(startIndex) // mapped start is the new index 0 of our buffer. + // new length will be how long the current buffer is, minus the absolute starting index. + newBufferLength := len(eb.buffer) - startIndex + // case where the head index is to the left of the tail index. + // e.x. [... head, tail, ...] + // mappedStart + newBufferLength is the upper bound of the new buffer list + // if we don't have to worry about mapping. + // + // e.x. [mappedStart:mappedStart+newBufferLength] <- this is our new buffer. + // + // If this value is less than or equal to the length then we don't need + // to worry about any part of the list wrapping around. + if mappedStart+newBufferLength > len(eb.buffer) { + // now we can find the actual end index, by offsetting the startIndex + // by the length and mapping it. + // [... startIndex+newBufferLen ... startIndex ...] + end := eb.mapIndex(startIndex + newBufferLength) + tmp := make([]interface{}, len(eb.buffer[:end])) + copy(tmp, eb.buffer[:end]) + + eb.buffer = eb.buffer[mappedStart:] + eb.buffer = append(eb.buffer, tmp...) + + // at this point the buffer is such that the 0th element + // maps to the 0th index in the buffer array. + eb.next = len(eb.buffer) + if eb.isFull() { + eb.next = eb.next % eb.maxSize + } + return + } + // otherwise, the head is to the right of the tail. + begin := mappedStart + end := mappedStart + newBufferLength + eb.buffer = eb.buffer[begin:end] + eb.next = len(eb.buffer) + if eb.isFull() { + eb.next = eb.next % eb.maxSize + } +} + +// Iterate is a convenience function over IterateValid that iterates +// all elements in the buffer. +func (eb *RingBuffer) Iterate(callback func(interface{})) { + eb.IterateValid(func(e interface{}) bool { return true }, callback) +} + +// Size returns the size of the buffer. +func (eb *RingBuffer) Size() int { + return len(eb.buffer) +} diff --git a/vendor/github.com/cilium/cilium/pkg/controller/cell.go b/vendor/github.com/cilium/cilium/pkg/controller/cell.go new file mode 100644 index 0000000000..53119459a0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/controller/cell.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package controller + +import ( + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/metrics/metric" +) + +const ( + controllerGroupMetrics = "controller-group-metrics" + + // labelControllerGroupName is the label used + // to identify controller-specific metrics + labelControllerGroupName = "group_name" +) + +var ( + // GroupMetricEnabled is populated with the set of ControllerGroups for which metrics are enabled + groupMetricEnabled = map[string]bool{} + + // GroupRuns is a Prometheus-compatible metric for Controller + // runs, labeled by completion status and Group name + GroupRuns = metrics.NoOpCounterVec +) + +var Cell = cell.Module( + "controller", + "Controllers and Controller Lifecycle management", + cell.Config(Config{}), + cell.Metric(NewMetrics), + cell.Invoke(Init), +) + +type Metrics struct { + ControllerGroupRuns metric.Vec[metric.Counter] +} + +func NewMetrics() Metrics { + return Metrics{ + ControllerGroupRuns: metric.NewCounterVec(metric.CounterOpts{ + ConfigName: metrics.Namespace + "_controllers_group_runs_total", + Namespace: metrics.Namespace, + Name: "controllers_group_runs_total", + Help: "Number of times that a controller group was run, labeled by completion status and controller group name", + }, []string{labelControllerGroupName, metrics.LabelStatus}), + } +} + +type Config struct { + // ControllerGroupMetrics is an option which specifies the set of ControllerGroups names + // for which metrics will be enabled. The special values 'all' and 'none' are supported. + ControllerGroupMetrics []string +} + +func (cfg Config) Flags(flags *pflag.FlagSet) { + flags.StringSlice(controllerGroupMetrics, cfg.ControllerGroupMetrics, + "List of controller group names for which to to enable metrics. "+ + "Accepts 'all' and 'none'. "+ + "The set of controller group names available is not guaranteed to be stable between Cilium versions.") +} + +func Init(cfg Config, m Metrics) { + // Initialize package-scoped references to Cell configuration + for _, name := range cfg.ControllerGroupMetrics { + groupMetricEnabled[name] = true + } + + GroupRuns = m.ControllerGroupRuns +} diff --git a/vendor/github.com/cilium/cilium/pkg/controller/controller.go b/vendor/github.com/cilium/cilium/pkg/controller/controller.go new file mode 100644 index 0000000000..f35bded20d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/controller/controller.go @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package controller + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/time" +) + +const ( + success = "success" + failure = "failure" + + // special Group "names" for metrics config + allControllerMetricsEnabled = "all" + noControllerMetricsEnabled = "none" +) + +// ControllerFunc is a function that the controller runs. This type is used for +// DoFunc and StopFunc. +type ControllerFunc func(ctx context.Context) error + +// ExitReason is a returnable type from DoFunc that causes the +// controller to exit. This reason is recorded in the controller's status. The +// controller is not removed from any manager. +// Construct one with NewExitReason("a reason") +type ExitReason struct { + // This is constucted in this odd way because the type assertion in + // runController didn't work otherwise. + error +} + +// NewExitReason returns a new ExitReason +func NewExitReason(reason string) ExitReason { + return ExitReason{errors.New(reason)} +} + +// Group contains metadata about a group of controllers +type Group struct { + // Name of the controller group. + // + // This name MUST NOT be dynamically generated based on + // resource identifier in order to limit metrics cardinality. + Name string +} + +func NewGroup(name string) Group { + return Group{Name: name} +} + +// ControllerParams contains all parameters of a controller +type ControllerParams struct { + // Group is used for aggregate metrics collection. + // The Group.Name must NOT be dynamically generated from a + // resource identifier in order to limit metrics cardinality. + Group Group + + HealthReporter cell.HealthReporter + + // DoFunc is the function that will be run until it succeeds and/or + // using the interval RunInterval if not 0. + // An unset DoFunc is an error and will be logged as one. + DoFunc ControllerFunc + + // CancelDoFuncOnUpdate when set to true cancels the controller context + // (the DoFunc) to allow quick termination of controller + CancelDoFuncOnUpdate bool + + // StopFunc is called when the controller stops. It is intended to run any + // clean-up tasks for the controller (e.g. deallocate/release resources) + // It is guaranteed that DoFunc is called at least once before StopFunc is + // called. + // An unset StopFunc is not an error (and will be a no-op) + // Note: Since this occurs on controller exit, error counts and tracking may + // not be checked after StopFunc is run. + StopFunc ControllerFunc + + // If set to any other value than 0, will cause DoFunc to be run in the + // specified interval. The interval starts from when the DoFunc has + // returned last + RunInterval time.Duration + + // If set to any other value than 0, will cap the error retry interval + // to the specified interval. + MaxRetryInterval time.Duration + + // ErrorRetryBaseDuration is the initial time to wait to run DoFunc + // again on return of an error. On each consecutive error, this value + // is multiplied by the number of consecutive errors to provide a + // constant back off. The default is 1s. + ErrorRetryBaseDuration time.Duration + + // NoErrorRetry when set to true, disabled retries on errors + NoErrorRetry bool + + Context context.Context +} + +// undefinedDoFunc is used when no DoFunc is set. controller.DoFunc is set to this +// when the controller is incorrectly initialised. +func undefinedDoFunc(name string) error { + return fmt.Errorf("controller %s DoFunc is nil", name) +} + +// NoopFunc is a no-op placeholder for DoFunc & StopFunc. +// It is automatically used when StopFunc is undefined, and can be used as a +// DoFunc stub when the controller should only run StopFunc. +func NoopFunc(ctx context.Context) error { + return nil +} + +// isGroupMetricEnabled returns true if metrics are enabled for the Group +// +// The controller metrics config option is used to determine +// if "all", "none" (takes precedence over "all"), or the +// given set of Group names should be enabled. +// +// If no controller metrics config option was provided, +// only then is the DefaultMetricsEnabled field used. +func isGroupMetricEnabled(g Group) bool { + var metricsEnabled = groupMetricEnabled + if metricsEnabled == nil { + // There is currently no guarantee that a caller of this function + // has initialized the configuration map using the hive cell. + return false + } + + if metricsEnabled[noControllerMetricsEnabled] { + // "none" takes precedence over "all" + return false + } else if metricsEnabled[allControllerMetricsEnabled] { + return true + } else { + return metricsEnabled[g.Name] + } +} + +// Controller is a simple pattern that allows to perform the following +// tasks: +// - Run an operation in the background and retry until it succeeds +// - Perform a regular sync operation in the background +// +// A controller has configurable retry intervals and will collect statistics +// on number of successful runs, number of failures, last error message, +// and last error timestamp. +// +// Controllers have a name and are tied to a Manager. The manager is typically +// bound to higher level objects such as endpoint. These higher level objects +// can then run multiple controllers to perform async tasks such as: +// - Annotating k8s resources with values +// - Synchronizing an object with the kvstore +// - Any other async operation to may fail and require retries +// +// Embedding the Manager into higher level resources allows to bind controllers +// to the lifetime of that object. Controllers also have a UUID to allow +// correlating all log messages of a controller instance. +// +// Guidelines to writing controllers: +// - Make sure that the task the controller performs is done in an atomic +// fashion, e.g. if a controller modifies a resource in multiple steps, an +// intermediate manipulation operation failing should not leave behind +// an inconsistent state. This can typically be achieved by locking the +// resource and rolling back or by using transactions. +// - Controllers typically act on behalf of a higher level object such as an +// endpoint. The controller must ensure that the higher level object is +// properly locked when accessing any fields. +// - Controllers run asynchronously in the background, it is the responsibility +// of the controller to be aware of the lifecycle of the owning higher level +// object. This is typically achieved by removing all controllers when the +// owner dies. It is the responsibility of the owner to either lock the owner +// in a way that will delay destruction throughout the controller run or to +// check for the destruction throughout the run. +type controller struct { + // Constant after creation, safe to access without locking + group Group + name string + uuid string + logger *logrus.Entry + + // Channels written to and/or closed by the manager + stop chan struct{} + update chan ControllerParams + trigger chan struct{} + + // terminated is closed by the controller goroutine when it terminates + terminated chan struct{} + + // Manipulated by the controller, read by the Manager, requires locking + mutex lock.RWMutex + successCount int + lastSuccessStamp time.Time + failureCount int + consecutiveErrors int + lastError error + lastErrorStamp time.Time + lastDuration time.Duration +} + +// GetSuccessCount returns the number of successful controller runs +func (c *controller) GetSuccessCount() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.successCount +} + +// GetFailureCount returns the number of failed controller runs +func (c *controller) GetFailureCount() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.failureCount +} + +// GetLastError returns the last error returned +func (c *controller) GetLastError() error { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.lastError +} + +// GetLastErrorTimestamp returns the last error returned +func (c *controller) GetLastErrorTimestamp() time.Time { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.lastErrorStamp +} + +func (c *controller) runController(params ControllerParams) { + errorRetries := 1 + + runTimer, timerDone := inctimer.New() + defer timerDone() + + for { + var err error + + interval := params.RunInterval + + start := time.Now() + err = params.DoFunc(params.Context) + duration := time.Since(start) + + c.mutex.Lock() + c.lastDuration = duration + c.getLogger().Debug("Controller func execution time: ", c.lastDuration) + + if err != nil { + if params.Context.Err() != nil { + // The controller's context was canceled. Let's wait for the + // next controller update (or stop). + err = NewExitReason("controller context canceled") + } + + switch err := err.(type) { + case ExitReason: + // This is actually not an error case, but it causes an exit + c.recordSuccess(params.HealthReporter) + c.lastError = err // This will be shown in the controller status + + // Don't exit the goroutine, since that only happens when the + // controller is explicitly stopped. Instead, just wait for + // the next update. + c.getLogger().Debug("Controller run succeeded; waiting for next controller update or stop") + interval = time.Duration(math.MaxInt64) + + default: + c.getLogger().WithField(fieldConsecutiveErrors, errorRetries). + WithError(err).Debug("Controller run failed") + c.recordError(err, params.HealthReporter) + + if !params.NoErrorRetry { + if params.ErrorRetryBaseDuration != time.Duration(0) { + interval = time.Duration(errorRetries) * params.ErrorRetryBaseDuration + } else { + interval = time.Duration(errorRetries) * time.Second + } + + if params.MaxRetryInterval > 0 && interval > params.MaxRetryInterval { + c.getLogger().WithFields(logrus.Fields{ + "calculatedInterval": interval, + "maxAllowedInterval": params.MaxRetryInterval, + }).Debug("Cap retry interval to max allowed value") + interval = params.MaxRetryInterval + } + + errorRetries++ + } + } + } else { + c.recordSuccess(params.HealthReporter) + + // reset error retries after successful attempt + errorRetries = 1 + + // If no run interval is specified, no further updates + // are required. + if interval == time.Duration(0) { + // Don't exit the goroutine, since that only happens when the + // controller is explicitly stopped. Instead, just wait for + // the next update. + c.getLogger().Debug("Controller run succeeded; waiting for next controller update or stop") + interval = time.Duration(math.MaxInt64) + } + } + + c.mutex.Unlock() + + select { + case <-c.stop: + goto shutdown + + case params = <-c.update: + // update channel is never closed + case <-runTimer.After(interval): + // timer channel is not yet closed + case <-c.trigger: + // trigger channel is never closed + } + + // If we receive a signal on multiple channels golang will pick one randomly. + // This select will make sure we don't execute the controller + // while we are shutting down. + select { + case <-c.stop: + goto shutdown + default: + } + } + +shutdown: + c.getLogger().Debug("Shutting down controller") + + if err := params.StopFunc(context.TODO()); err != nil { + c.mutex.Lock() + c.recordError(err, params.HealthReporter) + c.mutex.Unlock() + c.getLogger().WithField(fieldConsecutiveErrors, errorRetries). + WithError(err).Warn("Error on Controller stop") + } + + close(c.terminated) +} + +// logger returns a logrus object with controllerName and UUID fields. +func (c *controller) getLogger() *logrus.Entry { + if c.logger == nil { + c.logger = log.WithFields(logrus.Fields{ + fieldControllerName: c.name, + fieldUUID: c.uuid, + }) + } + + return c.logger +} + +// recordError updates all statistic collection variables on error +// c.mutex must be held. +func (c *controller) recordError(err error, hr cell.HealthReporter) { + if hr != nil { + hr.Degraded(c.name, err) + } + c.lastError = err + c.lastErrorStamp = time.Now() + c.failureCount++ + c.consecutiveErrors++ + + metrics.ControllerRuns.WithLabelValues(failure).Inc() + if isGroupMetricEnabled(c.group) { + GroupRuns.WithLabelValues(c.group.Name, failure).Inc() + } + metrics.ControllerRunsDuration.WithLabelValues(failure).Observe(c.lastDuration.Seconds()) +} + +// recordSuccess updates all statistic collection variables on success +// c.mutex must be held. +func (c *controller) recordSuccess(hr cell.HealthReporter) { + if hr != nil { + hr.OK(c.name) + } + + c.lastError = nil + c.lastSuccessStamp = time.Now() + c.successCount++ + c.consecutiveErrors = 0 + + metrics.ControllerRuns.WithLabelValues(success).Inc() + if isGroupMetricEnabled(c.group) { + GroupRuns.WithLabelValues(c.group.Name, success).Inc() + } + metrics.ControllerRunsDuration.WithLabelValues(success).Observe(c.lastDuration.Seconds()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/controller/doc.go b/vendor/github.com/cilium/cilium/pkg/controller/doc.go new file mode 100644 index 0000000000..0bab327fd2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/controller/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package controller provide a simple pattern for async operations that +// require retries and/or regular intervals. +package controller diff --git a/vendor/github.com/cilium/cilium/pkg/controller/logfields.go b/vendor/github.com/cilium/cilium/pkg/controller/logfields.go new file mode 100644 index 0000000000..ff14469251 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/controller/logfields.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package controller + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// logging field definitions +const ( + // fieldControllerName is the name of the controller + fieldControllerName = "name" + + // fieldUUID is the UUID of the controller + fieldUUID = "uuid" + + // fieldConsecutiveErrors is the number of consecutive errors of a controller + fieldConsecutiveErrors = "consecutiveErrors" +) + +var ( + // log is the controller package logger object. + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "controller") +) diff --git a/vendor/github.com/cilium/cilium/pkg/controller/manager.go b/vendor/github.com/cilium/cilium/pkg/controller/manager.go new file mode 100644 index 0000000000..f7d42b305a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/controller/manager.go @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package controller + +import ( + "context" + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" +) + +var ( + // globalStatus is the global status of all controllers + globalStatus = NewManager() +) + +type controllerMap map[string]*managedController + +// Manager is a list of controllers +type Manager struct { + controllers controllerMap + mutex lock.RWMutex +} + +// NewManager allocates a new manager +func NewManager() *Manager { + return &Manager{ + controllers: controllerMap{}, + } +} + +// GetGlobalStatus returns the status of all controllers +func GetGlobalStatus() models.ControllerStatuses { + return globalStatus.GetStatusModel() +} + +// UpdateController installs or updates a controller in the +// manager. A controller is primarily identified by its name. +// If a controller with the name already exists, the controller +// will be shut down and replaced with the provided controller. +// +// Updating a controller will cause the DoFunc to be run immediately regardless +// of any previous conditions. It will also cause any statistics to be reset. +func (m *Manager) UpdateController(name string, params ControllerParams) { + m.updateController(name, params) +} + +func (m *Manager) updateController(name string, params ControllerParams) *managedController { + start := time.Now() + + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.controllers == nil { + m.controllers = controllerMap{} + } + + if params.Group.Name == "" { + log.Errorf( + "Controller initialized with unpopulated group information. " + + "Metrics will not be exported for this controller.") + } + + ctrl, exists := m.controllers[name] + if exists { + ctrl.getLogger().Debug("Updating existing controller") + ctrl.updateParamsLocked(params) + + // Notify the goroutine of the params update. + select { + case ctrl.update <- ctrl.params: + default: + } + + ctrl.getLogger().Debug("Controller update time: ", time.Since(start)) + } else { + return m.createControllerLocked(name, params) + } + + return ctrl +} + +func (m *Manager) createControllerLocked(name string, params ControllerParams) *managedController { + ctrl := &managedController{ + controller: controller{ + name: name, + group: params.Group, + uuid: uuid.New().String(), + stop: make(chan struct{}), + update: make(chan ControllerParams, 1), + trigger: make(chan struct{}, 1), + terminated: make(chan struct{}), + }, + } + ctrl.updateParamsLocked(params) + ctrl.getLogger().Debug("Starting new controller") + + m.controllers[ctrl.name] = ctrl + + globalStatus.mutex.Lock() + globalStatus.controllers[ctrl.uuid] = ctrl + globalStatus.mutex.Unlock() + + go ctrl.runController(ctrl.params) + return ctrl +} + +// CreateController installs a new controller in the +// manager. If a controller with the name already exists +// this method returns false without triggering, otherwise +// creates the controller and runs it immediately. +func (m *Manager) CreateController(name string, params ControllerParams) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.controllers != nil { + if _, exists := m.controllers[name]; exists { + return false + } + } else { + m.controllers = controllerMap{} + } + m.createControllerLocked(name, params) + return true +} + +func (m *Manager) removeController(ctrl *managedController) { + ctrl.stopController() + delete(m.controllers, ctrl.name) + + globalStatus.mutex.Lock() + delete(globalStatus.controllers, ctrl.uuid) + globalStatus.mutex.Unlock() + + ctrl.getLogger().Debug("Removed controller") +} + +func (m *Manager) lookup(name string) *managedController { + m.mutex.RLock() + defer m.mutex.RUnlock() + + if c, ok := m.controllers[name]; ok { + return c + } + + return nil +} + +func (m *Manager) removeAndReturnController(name string) (*managedController, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.controllers == nil { + return nil, fmt.Errorf("empty controller map") + } + + oldCtrl, ok := m.controllers[name] + if !ok { + return nil, fmt.Errorf("unable to find controller %s", name) + } + + m.removeController(oldCtrl) + + return oldCtrl, nil +} + +// RemoveController stops and removes a controller from the manager. If DoFunc +// is currently running, DoFunc is allowed to complete in the background. +func (m *Manager) RemoveController(name string) error { + _, err := m.removeAndReturnController(name) + return err +} + +// RemoveControllerAndWait stops and removes a controller using +// RemoveController() and then waits for it to run to completion. +func (m *Manager) RemoveControllerAndWait(name string) error { + oldCtrl, err := m.removeAndReturnController(name) + if err == nil { + <-oldCtrl.terminated + } + + return err +} + +func (m *Manager) removeAll() []*managedController { + ctrls := []*managedController{} + + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.controllers == nil { + return ctrls + } + + for _, ctrl := range m.controllers { + m.removeController(ctrl) + ctrls = append(ctrls, ctrl) + } + + return ctrls +} + +// RemoveAll stops and removes all controllers of the manager +func (m *Manager) RemoveAll() { + m.removeAll() +} + +// RemoveAllAndWait stops and removes all controllers of the manager and then +// waits for all controllers to exit +func (m *Manager) RemoveAllAndWait() { + ctrls := m.removeAll() + for _, ctrl := range ctrls { + <-ctrl.terminated + } +} + +// GetStatusModel returns the status of all controllers as models.ControllerStatuses +func (m *Manager) GetStatusModel() models.ControllerStatuses { + // Create a copy of pointers to current controller so we can unlock the + // manager mutex quickly again + controllers := controllerMap{} + m.mutex.RLock() + for key, c := range m.controllers { + controllers[key] = c + } + m.mutex.RUnlock() + + statuses := models.ControllerStatuses{} + for _, c := range controllers { + statuses = append(statuses, c.GetStatusModel()) + } + + return statuses +} + +// TriggerController triggers the controller with the specified name. +func (m *Manager) TriggerController(name string) { + ctrl := m.lookup(name) + if ctrl == nil { + return + } + + select { + case ctrl.trigger <- struct{}{}: + default: + } +} + +// FakeManager returns a fake controller manager with the specified number of +// failing controllers. The returned manager is identical in any regard except +// for internal pointers. +// Used for testing only. +func FakeManager(failingControllers int) *Manager { + m := &Manager{ + controllers: controllerMap{}, + } + + for i := 0; i < failingControllers; i++ { + ctrl := &managedController{ + controller: controller{ + name: fmt.Sprintf("controller-%d", i), + uuid: fmt.Sprintf("%d", i), + stop: make(chan struct{}), + update: make(chan ControllerParams, 1), + trigger: make(chan struct{}, 1), + terminated: make(chan struct{}), + lastError: fmt.Errorf("controller failed"), + failureCount: 1, + consecutiveErrors: 1, + }, + } + + ctrl.params.Context, ctrl.cancelDoFunc = context.WithCancel(context.Background()) + m.controllers[ctrl.name] = ctrl + } + + return m +} + +type managedController struct { + controller + + params ControllerParams + cancelDoFunc context.CancelFunc +} + +// updateParamsLocked sanitizes and sets the controller's parameters. +// +// If the RunInterval exceeds ControllerMaxInterval, it will be capped. +// +// Manager's mutex must be held +func (c *managedController) updateParamsLocked(params ControllerParams) { + // ensure the callbacks are valid + if params.DoFunc == nil { + params.DoFunc = func(ctx context.Context) error { + return undefinedDoFunc(c.name) + } + } + if params.StopFunc == nil { + params.StopFunc = NoopFunc + } + + // Enforce max controller interval + maxInterval := time.Duration(option.Config.MaxControllerInterval) * time.Second + if maxInterval > 0 && params.RunInterval > maxInterval { + c.getLogger().Infof("Limiting interval to %s", maxInterval) + params.RunInterval = maxInterval + } + + // Save current context on update if not canceling + ctx := c.params.Context + // Check if the current context needs to be cancelled + if c.params.CancelDoFuncOnUpdate && c.cancelDoFunc != nil { + c.cancelDoFunc() + c.params.Context = nil + } + + // (re)set the context as the previous might have been cancelled + if c.params.Context == nil { + if params.Context == nil { + ctx, c.cancelDoFunc = context.WithCancel(context.Background()) + } else { + ctx, c.cancelDoFunc = context.WithCancel(params.Context) + } + } + + c.params = params + c.params.Context = ctx +} + +func (c *managedController) stopController() { + if c.cancelDoFunc != nil { + c.cancelDoFunc() + } + + close(c.stop) +} + +// GetStatusModel returns a models.ControllerStatus representing the +// controller's configuration & status +func (c *managedController) GetStatusModel() *models.ControllerStatus { + c.mutex.RLock() + defer c.mutex.RUnlock() + + status := &models.ControllerStatus{ + Name: c.name, + UUID: strfmt.UUID(c.uuid), + Configuration: &models.ControllerStatusConfiguration{ + ErrorRetry: !c.params.NoErrorRetry, + ErrorRetryBase: strfmt.Duration(c.params.ErrorRetryBaseDuration), + Interval: strfmt.Duration(c.params.RunInterval), + }, + Status: &models.ControllerStatusStatus{ + SuccessCount: int64(c.successCount), + LastSuccessTimestamp: strfmt.DateTime(c.lastSuccessStamp), + FailureCount: int64(c.failureCount), + LastFailureTimestamp: strfmt.DateTime(c.lastErrorStamp), + ConsecutiveFailureCount: int64(c.consecutiveErrors), + }, + } + + if c.lastError != nil { + status.Status.LastFailureMsg = c.lastError.Error() + } + + return status +} diff --git a/vendor/github.com/cilium/cilium/pkg/counter/counter.go b/vendor/github.com/cilium/cilium/pkg/counter/counter.go new file mode 100644 index 0000000000..16cc30acfa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/counter/counter.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package counter + +// Counter tracks references for comparable . +// +// No thread safety is provided within this structure, the user is expected to +// handle concurrent access to this structure if it is used from multiple +// threads. +type Counter[T comparable] map[T]int + +// Add increments the reference count for the specified key. +func (c Counter[T]) Add(key T) bool { + value, exists := c[key] + c[key] = value + 1 + return !exists +} + +// Delete decrements the reference count for the specified key. +func (c Counter[T]) Delete(key T) bool { + value := c[key] + if value <= 1 { + delete(c, key) + return true + } + c[key] = value - 1 + return false +} + +// DeepCopy makes a new copy of the received Counter. +func (c Counter[T]) DeepCopy() Counter[T] { + result := make(Counter[T], len(c)) + for k, v := range c { + result[k] = v + } + return result +} + +// Deprecated. Use Counter[string] instead. +type StringCounter = Counter[string] diff --git a/vendor/github.com/cilium/cilium/pkg/counter/doc.go b/vendor/github.com/cilium/cilium/pkg/counter/doc.go new file mode 100644 index 0000000000..f1c5ff1533 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/counter/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package counter provides generic reference counter objects +package counter diff --git a/vendor/github.com/cilium/cilium/pkg/counter/integer.go b/vendor/github.com/cilium/cilium/pkg/counter/integer.go new file mode 100644 index 0000000000..91f5fe78e6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/counter/integer.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package counter + +import ( + "sort" +) + +// IntCounter tracks references for integers with an optional limiter. +// +// No threadsafety is provided within this structure, the user is expected to +// handle concurrent access to this structure if it is used from multiple +// threads. +type IntCounter Counter[int] + +// Add increments the reference count for the specified integer key. +func (i IntCounter) Add(key int) (changed bool) { + return Counter[int](i).Add(key) +} + +// Delete decrements the reference count for the specified integer key. +func (i IntCounter) Delete(key int) bool { + return Counter[int](i).Delete(key) +} + +// DeepCopy makes a new copy of the received IntCounter. +func (i IntCounter) DeepCopy() IntCounter { + return IntCounter(Counter[int](i).DeepCopy()) +} + +// ToBPFData returns the keys as a slice, sorted from high to low. +func (i IntCounter) ToBPFData() []int { + result := make([]int, 0, len(i)) + for key := range i { + result = append(result, key) + } + sort.Sort(sort.Reverse(sort.IntSlice(result))) + return result +} diff --git a/vendor/github.com/cilium/cilium/pkg/counter/prefixes.go b/vendor/github.com/cilium/cilium/pkg/counter/prefixes.go new file mode 100644 index 0000000000..6c883c6bb5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/counter/prefixes.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package counter + +import ( + "fmt" + "net" + "net/netip" + + "github.com/cilium/cilium/pkg/lock" +) + +// PrefixLengthCounter tracks references to prefix lengths, limited by the +// maxUniquePrefixes count. Neither of the IPv4 or IPv6 counters nested within +// may contain more keys than the specified maximum number of unique prefixes. +type PrefixLengthCounter struct { + lock.RWMutex + + v4 IntCounter + v6 IntCounter + + maxUniquePrefixes4 int + maxUniquePrefixes6 int +} + +// NewPrefixLengthCounter returns a new PrefixLengthCounter which limits +// insertions to the specified maximum number of unique prefix lengths. +func NewPrefixLengthCounter(maxUniquePrefixes6, maxUniquePrefixes4 int) *PrefixLengthCounter { + return &PrefixLengthCounter{ + v4: make(IntCounter), + v6: make(IntCounter), + maxUniquePrefixes4: maxUniquePrefixes4, + maxUniquePrefixes6: maxUniquePrefixes6, + } +} + +func createIPNet(ones, bits int) netip.Prefix { + var addr netip.Addr + switch bits { + case net.IPv4len * 8: + addr = netip.IPv4Unspecified() + case net.IPv6len * 8: + addr = netip.IPv6Unspecified() + default: + // fall through to default library error + } + return netip.PrefixFrom(addr, ones) +} + +// DefaultPrefixLengthCounter creates a default prefix length counter that +// already counts the minimum and maximum prefix lengths for IP hosts and +// default routes (ie, /32 and /0). As with NewPrefixLengthCounter, insertions +// are limited to the specified maximum number of unique prefix lengths. +func DefaultPrefixLengthCounter() *PrefixLengthCounter { + maxIPv4 := net.IPv4len*8 + 1 + maxIPv6 := net.IPv6len*8 + 1 + counter := NewPrefixLengthCounter(maxIPv6, maxIPv4) + + defaultPrefixes := []netip.Prefix{ + // IPv4 + createIPNet(0, net.IPv4len*8), // world + createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts + + // IPv6 + createIPNet(0, net.IPv6len*8), // world + createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts + } + if _, err := counter.Add(defaultPrefixes); err != nil { + panic(fmt.Errorf("Failed to create default prefix lengths: %s", err)) + } + + return counter +} + +// checkLimits checks whether the specified new count of prefixes would exceed +// the specified limit on the maximum number of unique keys, and returns an +// error if it would exceed the limit. +func checkLimits(current, newCount, max int) error { + if newCount > max { + return fmt.Errorf("adding specified prefixes would result in too many prefix lengths (current: %d, result: %d, max: %d)", + current, newCount, max) + } + return nil +} + +// Add increments references to prefix lengths for the specified IPNets to the +// counter. If the maximum number of unique prefix lengths would be exceeded, +// returns an error. +// +// Returns true if adding these prefixes results in an increase in the total +// number of unique prefix lengths in the counter. +func (p *PrefixLengthCounter) Add(prefixes []netip.Prefix) (bool, error) { + p.Lock() + defer p.Unlock() + + // Assemble a map of references that need to be added + newV4Counter := p.v4.DeepCopy() + newV6Counter := p.v6.DeepCopy() + newV4Prefixes := false + newV6Prefixes := false + for _, prefix := range prefixes { + ones := prefix.Bits() + bits := prefix.Addr().BitLen() + + switch bits { + case net.IPv4len * 8: + if newV4Counter.Add(ones) { + newV4Prefixes = true + } + case net.IPv6len * 8: + if newV6Counter.Add(ones) { + newV6Prefixes = true + } + default: + return false, fmt.Errorf("unsupported IPAddr bitlength %d", bits) + } + } + + // Check if they can be added given the limit in place + if newV4Prefixes { + if err := checkLimits(len(p.v4), len(newV4Counter), p.maxUniquePrefixes4); err != nil { + return false, err + } + } + if newV6Prefixes { + if err := checkLimits(len(p.v6), len(newV6Counter), p.maxUniquePrefixes6); err != nil { + return false, err + } + } + + // Set and return whether anything changed + p.v4 = newV4Counter + p.v6 = newV6Counter + return newV4Prefixes || newV6Prefixes, nil +} + +// Delete reduces references to prefix lengths in the the specified IPNets from +// the counter. Returns true if removing references to these prefix lengths +// would result in a decrese in the total number of unique prefix lengths in +// the counter. +func (p *PrefixLengthCounter) Delete(prefixes []netip.Prefix) (changed bool) { + p.Lock() + defer p.Unlock() + + for _, prefix := range prefixes { + ones := prefix.Bits() + bits := prefix.Addr().BitLen() + switch bits { + case net.IPv4len * 8: + if p.v4.Delete(ones) { + changed = true + } + case net.IPv6len * 8: + if p.v6.Delete(ones) { + changed = true + } + } + } + + return changed +} + +// ToBPFData converts the counter into a set of prefix lengths that the BPF +// datapath can use for LPM lookup. +func (p *PrefixLengthCounter) ToBPFData() (s6, s4 []int) { + p.RLock() + defer p.RUnlock() + + return p.v6.ToBPFData(), p.v4.ToBPFData() +} diff --git a/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go b/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go new file mode 100644 index 0000000000..e2378e6c13 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package certificatemanager + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/cilium/cilium/pkg/hive/cell" + k8sClient "github.com/cilium/cilium/pkg/k8s/client" + "github.com/cilium/cilium/pkg/policy/api" + + "github.com/spf13/pflag" +) + +var Cell = cell.Module( + "certificate-manager", + "Provides TLS certificates and secrets", + + cell.Provide(NewManager), + + cell.Config(defaultManagerConfig), +) + +type CertificateManager interface { + GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, err error) +} + +type SecretManager interface { + GetSecrets(ctx context.Context, secret *api.Secret, ns string) (string, map[string][]byte, error) + GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error) +} + +var defaultManagerConfig = managerConfig{ + CertificatesDirectory: "/var/run/cilium/certs", +} + +type managerConfig struct { + // CertificatesDirectory is the root directory to be used by cilium to find + // certificates locally. + CertificatesDirectory string +} + +func (mc managerConfig) Flags(flags *pflag.FlagSet) { + flags.String("certificates-directory", mc.CertificatesDirectory, "Root directory to find certificates specified in L7 TLS policy enforcement") +} + +// Manager will manage the way certificates are retrieved based in the given +// k8sClient and rootPath. +type manager struct { + rootPath string + k8sClient k8sClient.Clientset +} + +// NewManager returns a new manager. +func NewManager(cfg managerConfig, clientset k8sClient.Clientset) (CertificateManager, SecretManager) { + m := &manager{ + rootPath: cfg.CertificatesDirectory, + k8sClient: clientset, + } + + return m, m +} + +// GetSecrets returns either local or k8s secrets, giving precedence for local secrets if configured. +// The 'ns' parameter is used as the secret namespace if 'secret.Namespace' is an empty string. +func (m *manager) GetSecrets(ctx context.Context, secret *api.Secret, ns string) (string, map[string][]byte, error) { + if secret == nil { + return "", nil, fmt.Errorf("Secret must not be nil") + } + + if secret.Namespace != "" { + ns = secret.Namespace + } + + if secret.Name == "" { + return ns, nil, fmt.Errorf("Missing Secret name") + } + nsName := filepath.Join(ns, secret.Name) + + // Give priority to local secrets. + // K8s API request is only done if the local secret directory can't be read! + certPath := filepath.Join(m.rootPath, nsName) + files, ioErr := os.ReadDir(certPath) + if ioErr == nil { + secrets := make(map[string][]byte, len(files)) + for _, file := range files { + var bytes []byte + + path := filepath.Join(certPath, file.Name()) + bytes, ioErr = os.ReadFile(path) + if ioErr == nil { + secrets[file.Name()] = bytes + } + } + // Return the (latest) error only if no secrets were found + if len(secrets) == 0 && ioErr != nil { + return nsName, nil, ioErr + } + return nsName, secrets, nil + } + secrets, err := m.k8sClient.GetSecrets(ctx, ns, secret.Name) + return nsName, secrets, err +} + +const ( + caDefaultName = "ca.crt" + publicDefaultName = "tls.crt" + privateDefaultName = "tls.key" +) + +// GetTLSContext returns a new ca, public and private certificates found based +// in the given api.TLSContext. +func (m *manager) GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, err error) { + name, secrets, err := m.GetSecrets(ctx, tlsCtx.Secret, ns) + if err != nil { + return "", "", "", err + } + + caName := caDefaultName + if tlsCtx.TrustedCA != "" { + caName = tlsCtx.TrustedCA + } + caBytes, ok := secrets[caName] + if ok { + ca = string(caBytes) + } else if tlsCtx.TrustedCA != "" { + return "", "", "", fmt.Errorf("Trusted CA %s not found in secret %s", caName, name) + } + + publicName := publicDefaultName + if tlsCtx.Certificate != "" { + publicName = tlsCtx.Certificate + } + publicBytes, ok := secrets[publicName] + if ok { + public = string(publicBytes) + } else if tlsCtx.Certificate != "" { + return "", "", "", fmt.Errorf("Certificate %s not found in secret %s", publicName, name) + } + + privateName := privateDefaultName + if tlsCtx.PrivateKey != "" { + privateName = tlsCtx.PrivateKey + } + privateBytes, ok := secrets[privateName] + if ok { + private = string(privateBytes) + } else if tlsCtx.PrivateKey != "" { + return "", "", "", fmt.Errorf("Private Key %s not found in secret %s", privateName, name) + } + + if caBytes == nil && publicBytes == nil && privateBytes == nil { + return "", "", "", fmt.Errorf("TLS certificates not found in secret %s ", name) + } + + return ca, public, private, nil +} + +// GetSecretString returns a secret string stored in a k8s secret +func (m *manager) GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error) { + name, secrets, err := m.GetSecrets(ctx, secret, ns) + if err != nil { + return "", err + } + + if len(secrets) == 1 { + // get the lone item by looping into the map + for _, value := range secrets { + return string(value), nil + } + } + return "", fmt.Errorf("Secret %s must have exactly one item", name) +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/bandwidth.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/bandwidth.go new file mode 100644 index 0000000000..c91130e634 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/bandwidth.go @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +// NOTE: We can only build on linux because we import bwmap which in turn imports pkg/ebpf and pkg/bpf +// which throw build errors when building on non-linux platforms. + +package bandwidth + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/cilium/cilium/pkg/datapath/linux/config/defines" + "github.com/cilium/cilium/pkg/datapath/linux/probes" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/maps/bwmap" + "github.com/cilium/cilium/pkg/sysctl" +) + +const ( + // EgressBandwidth is the K8s Pod annotation. + EgressBandwidth = "kubernetes.io/egress-bandwidth" + // IngressBandwidth is the K8s Pod annotation. + IngressBandwidth = "kubernetes.io/ingress-bandwidth" + + EnableBBR = "enable-bbr" + + // FqDefaultHorizon represents maximum allowed departure + // time delta in future. Given applications can set SO_TXTIME + // from user space this is a limit to prevent buggy applications + // to fill the FQ qdisc. + FqDefaultHorizon = bwmap.DefaultDropHorizon + // FqDefaultBuckets is the default 32k (2^15) bucket limit for bwm. + // Too low bucket limit can cause scalability issue. + FqDefaultBuckets = 15 +) + +type manager struct { + resetQueues, enabled bool + + params bandwidthManagerParams +} + +func (m *manager) Enabled() bool { + return m.enabled +} + +func (m *manager) BBREnabled() bool { + return m.params.Config.EnableBBR +} + +func (m *manager) defines() (defines.Map, error) { + cDefinesMap := make(defines.Map) + if m.resetQueues { + cDefinesMap["RESET_QUEUES"] = "1" + } + + if m.Enabled() { + cDefinesMap["ENABLE_BANDWIDTH_MANAGER"] = "1" + cDefinesMap["THROTTLE_MAP"] = bwmap.MapName + cDefinesMap["THROTTLE_MAP_SIZE"] = fmt.Sprintf("%d", bwmap.MapSize) + } + + return cDefinesMap, nil +} + +func (m *manager) DeleteEndpointBandwidthLimit(epID uint16) error { + if m.enabled { + return bwmap.Delete(epID) + } + return nil +} + +func GetBytesPerSec(bandwidth string) (uint64, error) { + res, err := resource.ParseQuantity(bandwidth) + if err != nil { + return 0, err + } + return uint64(res.Value() / 8), err +} + +// probe checks the various system requirements of the bandwidth manager and disables it if they are +// not met. +func (m *manager) probe() error { + // We at least need 5.1 kernel for native TCP EDT integration + // and writable queue_mapping that we use. Below helper is + // available for 5.1 kernels and onwards. + kernelGood := probes.HaveProgramHelper(ebpf.SchedCLS, asm.FnSkbEcnSetCe) == nil + m.resetQueues = kernelGood + if !m.params.Config.EnableBandwidthManager { + return nil + } + if _, err := sysctl.Read("net.core.default_qdisc"); err != nil { + m.params.Log.WithError(err).Warn("BPF bandwidth manager could not read procfs. Disabling the feature.") + return nil + } + if !kernelGood { + m.params.Log.Warn("BPF bandwidth manager needs kernel 5.1 or newer. Disabling the feature.") + return nil + } + if m.params.Config.EnableBBR { + // We at least need 5.18 kernel for Pod-based BBR TCP congestion + // control since earlier kernels just clear the skb->tstamp upon + // netns traversal. See also: + // + // - https://lpc.events/event/11/contributions/953/ + // - https://lore.kernel.org/bpf/20220302195519.3479274-1-kafai@fb.com/ + if probes.HaveProgramHelper(ebpf.SchedCLS, asm.FnSkbSetTstamp) != nil { + return fmt.Errorf("cannot enable --%s, needs kernel 5.18 or newer", EnableBBR) + } + } + + // Going via host stack will orphan skb->sk, so we do need BPF host + // routing for it to work properly. + if m.params.Config.EnableBBR && m.params.DaemonConfig.EnableHostLegacyRouting { + return fmt.Errorf("BPF bandwidth manager's BBR setup requires BPF host routing.") + } + + if m.params.Config.EnableBandwidthManager && m.params.DaemonConfig.EnableIPSec { + m.params.Log.Warning("The bandwidth manager cannot be used with IPSec. Disabling the bandwidth manager.") + return nil + } + + m.enabled = true + return nil +} + +func (m *manager) init() error { + // TODO make the bwmanager reactive by using the (currently ignored) watch channel, which is + // closed when there's new or changed devices. + devs := m.params.DaemonConfig.GetDevices() + if len(devs) == 0 { + m.params.Log.Warn("BPF bandwidth manager could not detect host devices. Disabling the feature.") + m.enabled = false + return nil + } + + m.params.Log.Info("Setting up BPF bandwidth manager") + + if err := bwmap.ThrottleMap().OpenOrCreate(); err != nil { + return fmt.Errorf("failed to access ThrottleMap: %w", err) + } + + if err := setBaselineSysctls(m.params); err != nil { + return fmt.Errorf("failed to set sysctl needed by BPF bandwidth manager: %w", err) + } + + // Pass 1: Set up Qdiscs. + for _, device := range devs { + link, err := netlink.LinkByName(device) + if err != nil { + m.params.Log.WithError(err).WithField("device", device).Warn("Link does not exist") + continue + } + // We strictly want to avoid a down/up cycle on the device at + // runtime, so given we've changed the default qdisc to FQ, we + // need to reset the root qdisc, and then set up MQ which will + // automatically get FQ leaf qdiscs (given it's been default). + qdisc := &netlink.GenericQdisc{ + QdiscAttrs: netlink.QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Parent: netlink.HANDLE_ROOT, + }, + QdiscType: "noqueue", + } + if err := netlink.QdiscReplace(qdisc); err != nil { + return fmt.Errorf("cannot replace root Qdisc to %s on device %s: %w", qdisc.QdiscType, device, err) + } + qdisc = &netlink.GenericQdisc{ + QdiscAttrs: netlink.QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Parent: netlink.HANDLE_ROOT, + }, + QdiscType: "mq", + } + which := "mq with fq leaves" + if err := netlink.QdiscReplace(qdisc); err != nil { + // No MQ support, so just replace to FQ directly. + fq := &netlink.Fq{ + QdiscAttrs: netlink.QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Parent: netlink.HANDLE_ROOT, + }, + Pacing: 1, + } + // At this point there is nothing we can do about + // it if we fail here, so hard bail out. + if err = netlink.QdiscReplace(fq); err != nil { + return fmt.Errorf("cannot replace root Qdisc to %s on device %s: %w", fq.Type(), device, err) + } + which = "fq" + } + m.params.Log.WithField("device", device).Infof("Setting qdisc to %s", which) + } + + // Pass 2: Iterate over leaf qdiscs and tweak their parameters. + for _, device := range devs { + link, err := netlink.LinkByName(device) + if err != nil { + m.params.Log.WithError(err).WithField("device", device).Warn("Link does not exist") + continue + } + qdiscs, err := netlink.QdiscList(link) + if err != nil { + m.params.Log.WithError(err).WithField("device", device).Warn("Cannot dump qdiscs") + continue + } + for _, qdisc := range qdiscs { + if qdisc.Type() == "fq" { + fq, _ := qdisc.(*netlink.Fq) + fq.Horizon = uint32(FqDefaultHorizon.Microseconds()) + fq.Buckets = uint32(FqDefaultBuckets) + if err := netlink.QdiscReplace(qdisc); err != nil { + m.params.Log.WithError(err).WithField("device", device).Warn("Cannot upgrade qdisc attributes") + } + } + } + } + return nil +} + +func setBaselineSysctls(p bandwidthManagerParams) error { + // Ensure interger type sysctls are no smaller than our baseline settings + baseIntSettings := map[string]int64{ + "net.core.netdev_max_backlog": 1000, + "net.core.somaxconn": 4096, + "net.ipv4.tcp_max_syn_backlog": 4096, + } + + for name, value := range baseIntSettings { + currentValue, err := sysctl.ReadInt(name) + if err != nil { + return fmt.Errorf("read sysctl %s failed: %s", name, err) + } + + scopedLog := p.Log.WithFields(logrus.Fields{ + logfields.SysParamName: name, + logfields.SysParamValue: currentValue, + "baselineValue": value, + }) + + if currentValue >= value { + scopedLog.Info("Skip setting sysctl as it already meets baseline") + continue + } + + scopedLog.Info("Setting sysctl to baseline for BPF bandwidth manager") + if err := sysctl.WriteInt(name, value); err != nil { + return fmt.Errorf("set sysctl %s=%d failed: %s", name, value, err) + } + } + + // Ensure string type sysctls + congctl := "cubic" + if p.Config.EnableBBR { + congctl = "bbr" + } + + baseStringSettings := map[string]string{ + "net.core.default_qdisc": "fq", + "net.ipv4.tcp_congestion_control": congctl, + } + + for name, value := range baseStringSettings { + p.Log.WithFields(logrus.Fields{ + logfields.SysParamName: name, + "baselineValue": value, + }).Info("Setting sysctl to baseline for BPF bandwidth manager") + + if err := sysctl.Write(name, value); err != nil { + return fmt.Errorf("set sysctl %s=%s failed: %s", name, value, err) + } + } + + // Extra settings + extraSettings := map[string]int64{ + "net.ipv4.tcp_slow_start_after_idle": 0, + } + + // Few extra knobs which can be turned on along with pacing. EnableBBR + // also provides the right kernel dependency implicitly as well. + if p.Config.EnableBBR { + for name, value := range extraSettings { + p.Log.WithFields(logrus.Fields{ + logfields.SysParamName: name, + "baselineValue": value, + }).Info("Setting sysctl to baseline for BPF bandwidth manager") + + if err := sysctl.WriteInt(name, value); err != nil { + return fmt.Errorf("set sysctl %s=%d failed: %s", name, value, err) + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/cell.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/cell.go new file mode 100644 index 0000000000..b572519439 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/cell.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build linux + +// NOTE: We can only build on linux because we import bwmap which in turn imports pkg/ebpf and pkg/bpf +// which throw build errors when building on non-linux platforms. + +package bandwidth + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/datapath/linux/config/defines" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/option" +) + +var Cell = cell.Module( + "bandwidth-manager", + "Linux Bandwidth Manager for EDT-based pacing", + + cell.Config(Config{false, false}), + cell.Provide(newBandwidthManager), +) + +type Config struct { + // EnableBandwidthManager enables EDT-based pacing + EnableBandwidthManager bool + + // EnableBBR enables BBR TCP congestion control for the node including Pods + EnableBBR bool +} + +func (def Config) Flags(flags *pflag.FlagSet) { + flags.Bool("enable-bandwidth-manager", def.EnableBandwidthManager, "Enable BPF bandwidth manager") + flags.Bool(EnableBBR, def.EnableBBR, "Enable BBR for the bandwidth manager") +} + +func newBandwidthManager(lc cell.Lifecycle, p bandwidthManagerParams) (Manager, defines.NodeFnOut) { + m := &manager{params: p} + + if !option.Config.DryMode { + lc.Append(m) + } + + return m, defines.NewNodeFnOut(m.defines) +} + +func (m *manager) Start(cell.HookContext) error { + err := m.probe() + if err != nil { + return err + } else if !m.enabled { + return nil + } + + return m.init() +} + +func (*manager) Stop(cell.HookContext) error { + return nil +} + +type bandwidthManagerParams struct { + cell.In + + Log logrus.FieldLogger + Config Config + DaemonConfig *option.DaemonConfig +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/doc.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/doc.go new file mode 100644 index 0000000000..6ad693fa9c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package bandwidth provides efficient EDT-based rate-limiting. +package bandwidth diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/types.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/types.go new file mode 100644 index 0000000000..89b6e5a1ee --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/bandwidth/types.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bandwidth + +type Manager interface { + BBREnabled() bool + DeleteEndpointBandwidthLimit(epID uint16) error + Enabled() bool +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/config/defines/defines.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/config/defines/defines.go new file mode 100644 index 0000000000..edaafe1067 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/config/defines/defines.go @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package defines + +import ( + "fmt" + + "github.com/cilium/cilium/pkg/hive/cell" +) + +// Map is the type containing the key-value pairs representing extra define +// directives for datapath node configuration. +type Map map[string]string + +func (m Map) Merge(other Map) error { + for key, value := range other { + if _, ok := m[key]; ok { + return fmt.Errorf("extra node define overwrites key %q", key) + } + + m[key] = value + } + return nil +} + +// NodeOut allows injecting configuration into the datapath. +type NodeOut struct { + cell.Out + NodeDefines Map `group:"header-node-defines"` +} + +// Fn is a function returning the key-value pairs representing extra define +// directives for datapath node configuration. +type Fn func() (Map, error) + +// NodeFnOut allows injecting configuration into the datapath +// by invoking a callback. +// +// Prefer using [NodeOut] if possible since it has a valid zero value. +type NodeFnOut struct { + cell.Out + // Fn must not be nil. + Fn `group:"header-node-define-fns"` +} + +// NewNodeFnOut wraps a function returning the key-value pairs representing +// extra define directives for datapath node configuration, so that it can be +// provided through the hive framework. +func NewNodeFnOut(fn Fn) NodeFnOut { + return NodeFnOut{Fn: fn} +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go new file mode 100644 index 0000000000..4896dc830b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "github.com/cilium/cilium/pkg/spanstat" +) + +// SpanStat is a statistics structure for storing metrics related to datapath +// load operations. +type SpanStat struct { + BpfCompilation spanstat.SpanStat + BpfWaitForELF spanstat.SpanStat + BpfWriteELF spanstat.SpanStat + BpfLoadProg spanstat.SpanStat +} + +// GetMap returns a map of statistic names to stats +func (s *SpanStat) GetMap() map[string]*spanstat.SpanStat { + return map[string]*spanstat.SpanStat{ + "bpfCompilation": &s.BpfCompilation, + "bpfWaitForELF": &s.BpfWaitForELF, + "bpfWriteELF": &s.BpfWriteELF, + "bpfLoadProg": &s.BpfLoadProg, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/cell.go b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/cell.go new file mode 100644 index 0000000000..9150b8c68b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/cell.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package tunnel + +import ( + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/option" +) + +// Cell is a cell that provides the parameters for the Cilium tunnel, +// based on user configuration and requests from external modules. +var Cell = cell.Module( + "datapath-tunnel-config", + "Tunneling configurations", + + cell.Config(userCfg{TunnelProtocol: defaults.TunnelProtocol}), + + cell.Provide( + newConfig, + + // Provide the datapath options. + Config.datapathConfigProvider, + + // Enable tunnel configuration when it is the primary routing mode. + func(dcfg *option.DaemonConfig) EnablerOut { + return NewEnabler(dcfg.TunnelingEnabled()) + }, + + // Enable tunnel configuration when DSR Geneve is enabled (this is currently + // handled here, as the corresponding logic has not yet been modularized). + func(dcfg *option.DaemonConfig) EnablerOut { + return NewEnabler( + (dcfg.EnableNodePort || + dcfg.KubeProxyReplacement == option.KubeProxyReplacementStrict || + dcfg.KubeProxyReplacement == option.KubeProxyReplacementTrue) && + dcfg.LoadBalancerUsesDSR() && + dcfg.LoadBalancerDSRDispatch == option.DSRDispatchGeneve, + // The datapath logic takes care of the MTU overhead. So no need to + // take it into account here. + // See encap_geneve_dsr_opt[4,6] in nodeport.h + WithoutMTUAdaptation(), + ) + }, + + // Enable tunnel configuration when High Scale IPCache is enabled (this is + // currently handled here, as the corresponding logic has not yet been modularized). + func(dcfg *option.DaemonConfig) EnablerOut { + return NewEnabler(dcfg.EnableHighScaleIPcache) + }, + ), +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go new file mode 100644 index 0000000000..ba83191398 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package tunnel + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + "github.com/vishvananda/netlink" + + dpcfgdef "github.com/cilium/cilium/pkg/datapath/linux/config/defines" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/hive/cell" +) + +// Protocol represents the valid types of encapsulation protocols. +type Protocol string + +const ( + // VXLAN specifies VXLAN encapsulation + VXLAN Protocol = "vxlan" + + // Geneve specifies Geneve encapsulation + Geneve Protocol = "geneve" + + // Disabled specifies to disable encapsulation + Disabled Protocol = "" +) + +func (tp Protocol) String() string { return string(tp) } + +func (tp Protocol) toDpID() string { + switch tp { + case VXLAN: + return "1" + case Geneve: + return "2" + default: + return "" + } +} + +// Config represents the materialized tunneling configuration to be used, +// depending on the user configuration and optional overrides required by +// additional features. +type Config struct { + protocol Protocol + port uint16 + deviceName string + shouldAdaptMTU bool +} + +type newConfigIn struct { + cell.In + + Cfg userCfg + Enablers []enabler `group:"request-enable-tunneling"` +} + +func newConfig(in newConfigIn) (Config, error) { + switch Protocol(in.Cfg.TunnelProtocol) { + case VXLAN, Geneve: + default: + return Config{}, fmt.Errorf("invalid tunnel protocol %q", in.Cfg.TunnelProtocol) + } + + cfg := Config{ + protocol: Protocol(in.Cfg.TunnelProtocol), + port: in.Cfg.TunnelPort, + } + + var enabled bool + for _, e := range in.Enablers { + if e.enable { + enabled = true + cfg.shouldAdaptMTU = cfg.shouldAdaptMTU || e.needsMTUAdaptation + + for _, validator := range e.validators { + if err := validator(cfg.protocol); err != nil { + return Config{}, err + } + } + } + } + + if !enabled { + return Config{protocol: Disabled}, nil + } + + switch cfg.protocol { + case VXLAN: + cfg.deviceName = defaults.VxlanDevice + + if cfg.port == 0 { + cfg.port = defaults.TunnelPortVXLAN + } + case Geneve: + cfg.deviceName = defaults.GeneveDevice + + if cfg.port == 0 { + cfg.port = defaults.TunnelPortGeneve + } + } + + return cfg, nil +} + +// NewTestConfig returns a new TunnelConfig for testing purposes. +func NewTestConfig(proto Protocol) Config { + cfg := Config{protocol: proto} + + switch proto { + case VXLAN: + cfg.port = defaults.TunnelPortVXLAN + cfg.deviceName = defaults.VxlanDevice + case Geneve: + cfg.port = defaults.TunnelPortGeneve + cfg.deviceName = defaults.GeneveDevice + } + + return cfg +} + +// Protocol returns the enabled tunnel protocol. The tunnel protocol may be +// set to either VXLAN or Geneve even when the primary mode is native routing, in +// case an additional feature (e.g., egress gateway) may request some traffic to +// be routed through a tunnel. +func (cfg Config) Protocol() Protocol { return cfg.protocol } + +// Port returns the port used by the tunnel (0 if disabled). +func (cfg Config) Port() uint16 { return cfg.port } + +// DeviceName returns the name of the tunnel device (empty if disabled). +func (cfg Config) DeviceName() string { return cfg.deviceName } + +// ShouldAdaptMTU returns whether we should adapt the MTU calculation to +// account for encapsulation. +func (cfg Config) ShouldAdaptMTU() bool { return cfg.shouldAdaptMTU } + +func (cfg Config) datapathConfigProvider() (dpcfgdef.NodeOut, dpcfgdef.NodeFnOut) { + defines := make(dpcfgdef.Map) + definesFn := func() (dpcfgdef.Map, error) { return nil, nil } + + if cfg.Protocol() != Disabled { + defines[fmt.Sprintf("TUNNEL_PROTOCOL_%s", strings.ToUpper(VXLAN.String()))] = VXLAN.toDpID() + defines[fmt.Sprintf("TUNNEL_PROTOCOL_%s", strings.ToUpper(Geneve.String()))] = Geneve.toDpID() + defines["TUNNEL_PROTOCOL"] = cfg.Protocol().toDpID() + defines["TUNNEL_PORT"] = fmt.Sprintf("%d", cfg.Port()) + + definesFn = func() (dpcfgdef.Map, error) { + tunnelDev, err := netlink.LinkByName(cfg.DeviceName()) + if err != nil { + return nil, fmt.Errorf("failed to retrieve device info for %q: %w", cfg.DeviceName(), err) + } + + return dpcfgdef.Map{ + "ENCAP_IFINDEX": fmt.Sprintf("%d", tunnelDev.Attrs().Index), + }, nil + } + } + + return dpcfgdef.NodeOut{NodeDefines: defines}, dpcfgdef.NewNodeFnOut(definesFn) +} + +// EnablerOut allows requesting to enable tunneling functionalities. +type EnablerOut struct { + cell.Out + Enabler enabler `group:"request-enable-tunneling"` +} + +// NewEnabler returns an object to be injected through hive to request to +// enable tunneling functionalities. Extra options are meaningful only when +// enable is set to true, and are ignored otherwise. +func NewEnabler(enable bool, opts ...enablerOpt) EnablerOut { + enabler := enabler{enable: enable, needsMTUAdaptation: enable} + + for _, opt := range opts { + opt(&enabler) + } + + return EnablerOut{Enabler: enabler} +} + +// WithValidator allows to register extra validation functions +// to assert that the configured tunnel protocol matches the one expected by +// the given feature. +func WithValidator(validator func(Protocol) error) enablerOpt { + return func(te *enabler) { + te.validators = append(te.validators, validator) + } +} + +// WithoutMTUAdaptation conveys that the given feature request +// to enable tunneling, but the MTU adaptation is already handled externally. +func WithoutMTUAdaptation() enablerOpt { + return func(te *enabler) { + te.needsMTUAdaptation = false + } +} + +type enabler struct { + enable bool + needsMTUAdaptation bool + validators []func(Protocol) error +} + +type enablerOpt func(*enabler) + +// userCfg wraps the tunnel-related user configurations. +type userCfg struct { + TunnelProtocol string + TunnelPort uint16 +} + +// Flags implements the cell.Flagger interface, to register the given flags. +func (def userCfg) Flags(flags *pflag.FlagSet) { + flags.String("tunnel-protocol", def.TunnelProtocol, "Encapsulation protocol to use for the overlay (\"vxlan\" or \"geneve\")") + flags.Uint16("tunnel-port", def.TunnelPort, fmt.Sprintf("Tunnel port (default %d for \"vxlan\" and %d for \"geneve\")", defaults.TunnelPortVXLAN, defaults.TunnelPortGeneve)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go new file mode 100644 index 0000000000..02da185572 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "io" + "net/netip" + + "github.com/cilium/cilium/pkg/cidr" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/node" + "github.com/cilium/cilium/pkg/option" +) + +// DeviceConfiguration is an interface for injecting configuration of datapath +// options that affect lookups and logic applied at a per-device level, whether +// those are devices associated with the endpoint or associated with the host. +type DeviceConfiguration interface { + // GetOptions fetches the configurable datapath options from the owner. + GetOptions() *option.IntOptions +} + +// LoadTimeConfiguration provides datapath implementations a clean interface +// to access endpoint-specific configuration that can be changed at load time. +type LoadTimeConfiguration interface { + // GetID returns a locally-significant endpoint identification number. + GetID() uint64 + // StringID returns the string-formatted version of the ID from GetID(). + StringID() string + // GetIdentity returns a globally-significant numeric security identity. + GetIdentity() identity.NumericIdentity + + // GetIdentityLocked returns a globally-significant numeric security + // identity while assuming that the backing data structure is locked. + // This function should be removed in favour of GetIdentity() + GetIdentityLocked() identity.NumericIdentity + + IPv4Address() netip.Addr + IPv6Address() netip.Addr + GetNodeMAC() mac.MAC +} + +// CompileTimeConfiguration provides datapath implementations a clean interface +// to access endpoint-specific configuration that can only be changed at +// compile time. +type CompileTimeConfiguration interface { + DeviceConfiguration + + // TODO: Move this detail into the datapath + ConntrackLocalLocked() bool + + // RequireARPPassthrough returns true if the datapath must implement + // ARP passthrough for this endpoint + RequireARPPassthrough() bool + + // RequireEgressProg returns true if the endpoint requires an egress + // program attached to the InterfaceName() invoking the section + // "to-container" + RequireEgressProg() bool + + // RequireRouting returns true if the endpoint requires BPF routing to + // be enabled, when disabled, routing is delegated to Linux routing + RequireRouting() bool + + // RequireEndpointRoute returns true if the endpoint wishes to have a + // per endpoint route installed in the host's routing table to point to + // the endpoint's interface + RequireEndpointRoute() bool + + // GetPolicyVerdictLogFilter returns the PolicyVerdictLogFilter for the endpoint + GetPolicyVerdictLogFilter() uint32 + + // IsHost returns true if the endpoint is the host endpoint. + IsHost() bool +} + +// EndpointConfiguration provides datapath implementations a clean interface +// to access endpoint-specific configuration when configuring the datapath. +type EndpointConfiguration interface { + CompileTimeConfiguration + LoadTimeConfiguration +} + +// ConfigWriter is anything which writes the configuration for various datapath +// program types. +type ConfigWriter interface { + // WriteNodeConfig writes the implementation-specific configuration of + // node-wide options into the specified writer. + WriteNodeConfig(io.Writer, *LocalNodeConfiguration) error + + // WriteNetdevConfig writes the implementation-specific configuration + // of configurable options to the specified writer. Options specified + // here will apply to base programs and not to endpoints, though + // endpoints may have equivalent configurable options. + WriteNetdevConfig(io.Writer, DeviceConfiguration) error + + // WriteTemplateConfig writes the implementation-specific configuration + // of configurable options for BPF templates to the specified writer. + WriteTemplateConfig(w io.Writer, cfg EndpointConfiguration) error + + // WriteEndpointConfig writes the implementation-specific configuration + // of configurable options for the endpoint to the specified writer. + WriteEndpointConfig(w io.Writer, cfg EndpointConfiguration) error +} + +// RemoteSNATDstAddrExclusionCIDRv4 returns a CIDR for SNAT exclusion. Any +// packet sent from a local endpoint to an IP address belonging to the CIDR +// should not be SNAT'd. +func RemoteSNATDstAddrExclusionCIDRv4() *cidr.CIDR { + if c := option.Config.GetIPv4NativeRoutingCIDR(); c != nil { + // ipv4-native-routing-cidr is set, so use it + return c + } + + return node.GetIPv4AllocRange() +} + +// RemoteSNATDstAddrExclusionCIDRv6 returns a IPv6 CIDR for SNAT exclusion. Any +// packet sent from a local endpoint to an IP address belonging to the CIDR +// should not be SNAT'd. +func RemoteSNATDstAddrExclusionCIDRv6() *cidr.CIDR { + if c := option.Config.GetIPv6NativeRoutingCIDR(); c != nil { + // ipv6-native-routing-cidr is set, so use it + return c + } + + return node.GetIPv6AllocRange() +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go new file mode 100644 index 0000000000..f4b1745a47 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import "github.com/cilium/cilium/pkg/datapath/linux/bandwidth" + +// Datapath is the interface to abstract all datapath interactions. The +// abstraction allows to implement the datapath requirements with multiple +// implementations +type Datapath interface { + ConfigWriter + IptablesManager + + // Node must return the handler for node events + Node() NodeHandler + + NodeIDs() NodeIDHandler + + NodeNeighbors() NodeNeighbors + + // LocalNodeAddressing must return the node addressing implementation + // of the local node + LocalNodeAddressing() NodeAddressing + + // Loader must return the implementation of the loader, which is responsible + // for loading, reloading, and compiling datapath programs. + Loader() Loader + + // WireguardAgent returns the WireGuard agent for the local node + WireguardAgent() WireguardAgent + + // LBMap returns the load-balancer map + LBMap() LBMap + + Procfs() string + + BandwidthManager() bandwidth.Manager +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go new file mode 100644 index 0000000000..df3bc01aa0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import "github.com/sirupsen/logrus" + +// Endpoint provides access endpoint configuration information that is necessary +// to compile and load the datapath. +type Endpoint interface { + EndpointConfiguration + InterfaceName() string + Logger(subsystem string) *logrus.Entry + StateDir() string +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/ipsec.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/ipsec.go new file mode 100644 index 0000000000..a62f7800a6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/ipsec.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +type IPsecKeyCustodian interface { + AuthKeySize() int + SPI() uint8 + StartBackgroundJobs(NodeHandler) error +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go new file mode 100644 index 0000000000..955876e8d8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" + "sort" + + "github.com/cilium/cilium/pkg/cidr" + "github.com/cilium/cilium/pkg/loadbalancer" +) + +// LBMap is the interface describing methods for manipulating service maps. +type LBMap interface { + UpsertService(*UpsertServiceParams) error + UpsertMaglevLookupTable(uint16, map[string]*loadbalancer.Backend, bool) error + IsMaglevLookupTableRecreated(bool) bool + DeleteService(loadbalancer.L3n4AddrID, int, bool, loadbalancer.SVCNatPolicy) error + AddBackend(*loadbalancer.Backend, bool) error + UpdateBackendWithState(*loadbalancer.Backend) error + DeleteBackendByID(loadbalancer.BackendID) error + AddAffinityMatch(uint16, loadbalancer.BackendID) error + DeleteAffinityMatch(uint16, loadbalancer.BackendID) error + UpdateSourceRanges(uint16, []*cidr.CIDR, []*cidr.CIDR, bool) error + DumpServiceMaps() ([]*loadbalancer.SVC, []error) + DumpBackendMaps() ([]*loadbalancer.Backend, error) + DumpAffinityMatches() (BackendIDByServiceIDSet, error) + DumpSourceRanges(bool) (SourceRangeSetByServiceID, error) + ExistsSockRevNat(cookie uint64, addr net.IP, port uint16) bool +} + +type UpsertServiceParams struct { + ID uint16 + IP net.IP + Port uint16 + + // PreferredBackends is a subset of ActiveBackends + // Note: this is only used in clustermesh with service affinity annotation. + PreferredBackends map[string]*loadbalancer.Backend + ActiveBackends map[string]*loadbalancer.Backend + NonActiveBackends []loadbalancer.BackendID + PrevBackendsCount int + IPv6 bool + Type loadbalancer.SVCType + NatPolicy loadbalancer.SVCNatPolicy + ExtLocal bool + IntLocal bool + Scope uint8 + SessionAffinity bool + SessionAffinityTimeoutSec uint32 + CheckSourceRange bool + UseMaglev bool + L7LBProxyPort uint16 // Non-zero for L7 LB services + Name loadbalancer.ServiceName // Fully qualified name of the service + LoopbackHostport bool +} + +// GetOrderedBackends returns an ordered list of backends with all the sorted +// preferred backend followed by active and non-active backends. +// Encapsulates logic to be also used in unit tests. +func (p *UpsertServiceParams) GetOrderedBackends() []loadbalancer.BackendID { + backendIDs := make([]loadbalancer.BackendID, 0, len(p.ActiveBackends)+len(p.NonActiveBackends)) + for _, b := range p.ActiveBackends { + backendIDs = append(backendIDs, b.ID) + } + + preferredMap := map[loadbalancer.BackendID]struct{}{} + for _, b := range p.PreferredBackends { + preferredMap[b.ID] = struct{}{} + } + + // Map iterations are non-deterministic so sort the backends by their IDs + // in order to maintain the same order before they are populated in BPF maps. + // This will minimize disruption to existing connections to the backends in the datapath. + sort.Slice(backendIDs, func(i, j int) bool { + // compare preferred flags of two backend IDs + _, firstPreferred := preferredMap[backendIDs[i]] + _, secondPreferred := preferredMap[backendIDs[j]] + + if firstPreferred && secondPreferred { + return backendIDs[i] < backendIDs[j] + } + + if firstPreferred { + return true + } + + if secondPreferred { + return false + } + + return backendIDs[i] < backendIDs[j] + }) + + // Add the non-active backends to the end of preferred/active backends list so that they are + // not considered while selecting backends to load-balance service traffic. + if len(p.NonActiveBackends) > 0 { + backendIDs = append(backendIDs, p.NonActiveBackends...) + } + + return backendIDs +} + +// BackendIDByServiceIDSet is the type of a set for checking whether a backend +// belongs to a given service +type BackendIDByServiceIDSet map[uint16]map[loadbalancer.BackendID]struct{} // svc ID => backend ID + +type SourceRangeSetByServiceID map[uint16][]*cidr.CIDR // svc ID => src range CIDRs diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go new file mode 100644 index 0000000000..f5b5bb64d7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "context" + "io" + "net" + + "github.com/cilium/cilium/pkg/datapath/loader/metrics" + "github.com/cilium/cilium/pkg/datapath/tunnel" + "github.com/cilium/cilium/pkg/lock" +) + +// Loader is an interface to abstract out loading of datapath programs. +type Loader interface { + CallsMapPath(id uint16) string + CustomCallsMapPath(id uint16) string + CompileAndLoad(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error + CompileOrLoad(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error + ReloadDatapath(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error + EndpointHash(cfg EndpointConfiguration) (string, error) + Unload(ep Endpoint) + Reinitialize(ctx context.Context, o BaseProgramOwner, tunnelConfig tunnel.Config, deviceMTU int, iptMgr IptablesManager, p Proxy) error + HostDatapathInitialized() <-chan struct{} +} + +// BaseProgramOwner is any type for which a loader is building base programs. +type BaseProgramOwner interface { + DeviceConfiguration + GetCompilationLock() *lock.RWMutex + Datapath() Datapath + LocalConfig() *LocalNodeConfiguration + SetPrefilter(pf PreFilter) +} + +// PreFilter an interface for an XDP pre-filter. +type PreFilter interface { + WriteConfig(fw io.Writer) + Dump(to []string) ([]string, int64) + Insert(revision int64, cidrs []net.IPNet) error + Delete(revision int64, cidrs []net.IPNet) error +} + +// Proxy is any type which installs rules related to redirecting traffic to +// a proxy. +type Proxy interface { + ReinstallRoutingRules() error + ReinstallIPTablesRules(ctx context.Context) error +} + +// IptablesManager manages iptables rules. +type IptablesManager interface { + // InstallProxyRules creates the necessary datapath config (e.g., iptables + // rules for redirecting host proxy traffic on a specific ProxyPort) + InstallProxyRules(ctx context.Context, proxyPort uint16, ingress, localOnly bool, name string) error + + // SupportsOriginalSourceAddr tells if the datapath supports + // use of original source addresses in proxy upstream + // connections. + SupportsOriginalSourceAddr() bool + InstallRules(ctx context.Context, ifName string, quiet, install bool) error + + // GetProxyPort fetches the existing proxy port configured for the + // specified listener. Used early in bootstrap to reopen proxy ports. + GetProxyPort(listener string) uint16 + + // InstallNoTrackRules is explicitly called when a pod has valid + // "policy.cilium.io/no-track-port" annotation. When + // InstallNoConntrackIptRules flag is set, a super set of v4 NOTRACK + // rules will be automatically installed upon agent bootstrap (via + // function addNoTrackPodTrafficRules) and this function will be + // skipped. When InstallNoConntrackIptRules is not set, this function + // will be executed to install NOTRACK rules. The rules installed by + // this function is very specific, for now, the only user is + // node-local-dns pods. + InstallNoTrackRules(IP string, port uint16, ipv6 bool) error + + // See comments for InstallNoTrackRules. + RemoveNoTrackRules(IP string, port uint16, ipv6 bool) error +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go new file mode 100644 index 0000000000..fcfcc2b7ba --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "context" + "net" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/cidr" + nodeTypes "github.com/cilium/cilium/pkg/node/types" +) + +type MTUConfiguration interface { + GetDeviceMTU() int + GetRouteMTU() int + GetRoutePostEncryptMTU() int +} + +// LocalNodeConfiguration represents the configuration of the local node +type LocalNodeConfiguration struct { + // MtuConfig is the MTU configuration of the node. + // + // This field is immutable at runtime. The value will not change in + // subsequent calls to NodeConfigurationChanged(). + MtuConfig MTUConfiguration + + // AuxiliaryPrefixes is the list of auxiliary prefixes that should be + // configured in addition to the node PodCIDR + // + // This field is mutable. The implementation of + // NodeConfigurationChanged() must adjust the routes accordingly. + AuxiliaryPrefixes []*cidr.CIDR + + // EnableIPv4 enables use of IPv4. Routing to the IPv4 allocation CIDR + // of other nodes must be enabled. + // + // This field is immutable at runtime. The value will not change in + // subsequent calls to NodeConfigurationChanged(). + EnableIPv4 bool + + // EnableIPv6 enables use of IPv6. Routing to the IPv6 allocation CIDR + // of other nodes must be enabled. + // + // This field is immutable at runtime. The value will not change in + // subsequent calls to NodeConfigurationChanged(). + EnableIPv6 bool + + // EnableEncapsulation enables use of encapsulation in communication + // between nodes. + // + // This field is immutable at runtime. The value will not change in + // subsequent calls to NodeConfigurationChanged(). + EnableEncapsulation bool + + // EnableAutoDirectRouting enables the use of direct routes for + // communication between nodes if two nodes have direct L2 + // connectivity. + // + // EnableAutoDirectRouting must be compatible with EnableEncapsulation + // and must provide a fallback to use encapsulation if direct routing + // is not feasible and encapsulation is enabled. + // + // This field is immutable at runtime. The value will not change in + // subsequent calls to NodeConfigurationChanged(). + EnableAutoDirectRouting bool + + // EnableLocalNodeRoute enables installation of the route which points + // the allocation prefix of the local node. Disabling this option is + // useful when another component is responsible for the routing of the + // allocation CIDR IPs into Cilium endpoints. + EnableLocalNodeRoute bool + + // EnableIPSec enables IPSec routes + EnableIPSec bool + + // EncryptNode enables encrypting NodeIP traffic requires EnableIPSec + EncryptNode bool + + // IPv4PodSubnets is a list of IPv4 subnets that pod IPs are assigned from + // these are then used when encryption is enabled to configure the node + // for encryption over these subnets at node initialization. + IPv4PodSubnets []*net.IPNet + + // IPv6PodSubnets is a list of IPv6 subnets that pod IPs are assigned from + // these are then used when encryption is enabled to configure the node + // for encryption over these subnets at node initialization. + IPv6PodSubnets []*net.IPNet +} + +// NodeHandler handles node related events such as addition, update or deletion +// of nodes or changes to the local node configuration. +// +// Node events apply to the local node as well as to remote nodes. The +// implementation can differ between the own local node and remote nodes by +// calling node.IsLocal(). +type NodeHandler interface { + // Name identifies the handler, this is used in logging/reporting handler + // reconciliation errors. + Name() string + + // NodeAdd is called when a node is discovered for the first time. + NodeAdd(newNode nodeTypes.Node) error + + // NodeUpdate is called when a node definition changes. Both the old + // and new node definition is provided. NodeUpdate() is never called + // before NodeAdd() is called for a particular node. + NodeUpdate(oldNode, newNode nodeTypes.Node) error + + // NodeDelete is called after a node has been deleted + NodeDelete(node nodeTypes.Node) error + + // AllNodeValidateImplementation is called to validate the implementation + // of all nodes in the node cache. + AllNodeValidateImplementation() + + // NodeValidateImplementation is called to validate the implementation of + // the node in the datapath. This function is intended to be run on an + // interval to ensure that the datapath is consistently converged. + NodeValidateImplementation(node nodeTypes.Node) error + + // NodeConfigurationChanged is called when the local node configuration + // has changed + NodeConfigurationChanged(config LocalNodeConfiguration) error +} + +type NodeNeighbors interface { + // NodeNeighDiscoveryEnabled returns whether node neighbor discovery is enabled + NodeNeighDiscoveryEnabled() bool + + // NodeNeighborRefresh is called to refresh node neighbor table + NodeNeighborRefresh(ctx context.Context, node nodeTypes.Node) + + // NodeCleanNeighbors cleans all neighbor entries for the direct routing device + // and the encrypt interface. + NodeCleanNeighbors(migrateOnly bool) +} + +type NodeIDHandler interface { + // GetNodeIP returns the string node IP that was previously registered as the given node ID. + GetNodeIP(uint16) string + + // GetNodeID gets the node ID for the given node IP. If none is found, exists is false. + GetNodeID(nodeIP net.IP) (nodeID uint16, exists bool) + + // DumpNodeIDs returns all node IDs and their associated IP addresses. + DumpNodeIDs() []*models.NodeID + + // RestoreNodeIDs restores node IDs and their associated IP addresses from the + // BPF map and into the node handler in-memory copy. + RestoreNodeIDs() +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go new file mode 100644 index 0000000000..4582958b31 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" + + "github.com/cilium/cilium/pkg/cidr" +) + +// NodeAddressingFamily is the node addressing information for a particular +// address family +type NodeAddressingFamily interface { + // Router is the address that will act as the router on each node where + // an agent is running on. Endpoints have a default route that points + // to this address. + Router() net.IP + + // PrimaryExternal is the primary external address of the node. Nodes + // must be able to reach each other via this address. + PrimaryExternal() net.IP + + // AllocationCIDR is the CIDR used for IP allocation of all endpoints + // on the node + AllocationCIDR() *cidr.CIDR + + // LocalAddresses lists all local addresses + LocalAddresses() ([]net.IP, error) + + // LoadBalancerNodeAddresses lists all addresses on which HostPort and + // NodePort services should be responded to + LoadBalancerNodeAddresses() []net.IP + + // DirectRouting returns the interface index and IP address for + // direct routing. + DirectRouting() (int, net.IP, bool) +} + +// NodeAddressing implements addressing of a node +type NodeAddressing interface { + IPv6() NodeAddressingFamily + IPv4() NodeAddressingFamily +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go new file mode 100644 index 0000000000..46aa3ca2db --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" + + "github.com/cilium/cilium/api/v1/models" +) + +// WireguardAgent manages the WireGuard peers +type WireguardAgent interface { + UpdatePeer(nodeName, pubKeyHex string, nodeIPv4, nodeIPv6 net.IP) error + DeletePeer(nodeName string) error + Status(includePeers bool) (*models.WireguardStatus, error) +} diff --git a/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go b/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go new file mode 100644 index 0000000000..62162e3648 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package debug + +import ( + "fmt" + + "github.com/cilium/cilium/pkg/lock" +) + +// StatusFunc is a function returning the debug status of a subsytem. It is +// passed into RegisterStatusFunc(). +type StatusFunc func() string + +// StatusMap is the collection of debug status of all subsystems. The key is +// the subsystem name. The value is the subsystem debug status. +type StatusMap map[string]string + +// StatusObject is the interface an object must impelement to be able to be +// passed into RegisterStatusObject(). +type StatusObject interface { + // DebugStatus() is the equivalent of StatusFunc. It must return the + // debug status as a string. + DebugStatus() string +} + +type functionMap map[string]StatusFunc + +type statusFunctions struct { + functions functionMap + mutex lock.RWMutex +} + +func newStatusFunctions() statusFunctions { + return statusFunctions{ + functions: functionMap{}, + } +} + +func (s *statusFunctions) register(name string, fn StatusFunc) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + if _, ok := s.functions[name]; ok { + return fmt.Errorf("subsystem already registered") + } + + s.functions[name] = fn + + return nil +} + +func (s *statusFunctions) registerStatusObject(name string, obj StatusObject) error { + return s.register(name, func() string { return obj.DebugStatus() }) +} + +func (s *statusFunctions) collectStatus() StatusMap { + fnCopy := functionMap{} + + // Make a copy to not hold the mutex while collecting the status + s.mutex.RLock() + for name, fn := range s.functions { + fnCopy[name] = fn + } + s.mutex.RUnlock() + + status := StatusMap{} + + for name, fn := range fnCopy { + status[name] = fn() + } + + return status +} + +var globalStatusFunctions = newStatusFunctions() + +// RegisterStatusFunc registers a subsystem and associates a status function to +// call for debug status collection +func RegisterStatusFunc(name string, fn StatusFunc) error { + return globalStatusFunctions.register(name, fn) +} + +// RegisterStatusObject registers a subsystem and associated a status object on +// which DebugStatus() is called to collect debug status +func RegisterStatusObject(name string, obj StatusObject) error { + return globalStatusFunctions.registerStatusObject(name, obj) +} + +// CollectSubsystemStatus collects the status of all subsystems and returns it +func CollectSubsystemStatus() StatusMap { + return globalStatusFunctions.collectStatus() +} diff --git a/vendor/github.com/cilium/cilium/pkg/ebpf/doc.go b/vendor/github.com/cilium/cilium/pkg/ebpf/doc.go new file mode 100644 index 0000000000..7a7cdb0cba --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ebpf/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package ebpf provides functions that allow golang programs to interact with +// ebpf maps by wrapping the cilium/ebpf library. +// +groupName=pkg +package ebpf diff --git a/vendor/github.com/cilium/cilium/pkg/ebpf/ebpf.go b/vendor/github.com/cilium/cilium/pkg/ebpf/ebpf.go new file mode 100644 index 0000000000..630074060f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ebpf/ebpf.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package ebpf + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "ebpf") +) diff --git a/vendor/github.com/cilium/cilium/pkg/ebpf/map.go b/vendor/github.com/cilium/cilium/pkg/ebpf/map.go new file mode 100644 index 0000000000..f5edec6bf6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ebpf/map.go @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package ebpf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + ciliumebpf "github.com/cilium/ebpf" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/metrics" +) + +type MapSpec = ciliumebpf.MapSpec + +type PinType = ciliumebpf.PinType + +const ( + Hash = ciliumebpf.Hash + PerCPUHash = ciliumebpf.PerCPUHash + Array = ciliumebpf.Array + HashOfMaps = ciliumebpf.HashOfMaps + LPMTrie = ciliumebpf.LPMTrie + + PinNone = ciliumebpf.PinNone + PinByName = ciliumebpf.PinByName +) + +var ( + ErrKeyNotExist = ciliumebpf.ErrKeyNotExist +) + +// IterateCallback represents the signature of the callback function expected by +// the IterateWithCallback method, which in turn is used to iterate all the +// keys/values of a map. +type IterateCallback func(key, value interface{}) + +// Map represents an eBPF map. +type Map struct { + lock lock.RWMutex + *ciliumebpf.Map + + spec *MapSpec + path string +} + +// NewMap creates a new Map object. +func NewMap(spec *MapSpec) *Map { + return &Map{ + spec: spec, + } +} + +// LoadRegisterMap loads the specified map from a bpffs pin path and registers +// its handle in the package-global map register. +func LoadRegisterMap(mapName string) (*Map, error) { + path := bpf.MapPath(mapName) + + m, err := LoadPinnedMap(path) + if err != nil { + return nil, err + } + + registerMap(m) + + return m, nil +} + +// LoadPinnedMap wraps cilium/ebpf's LoadPinnedMap. +func LoadPinnedMap(fileName string) (*Map, error) { + m, err := ciliumebpf.LoadPinnedMap(fileName, nil) + if err != nil { + return nil, err + } + + return &Map{ + Map: m, + path: fileName, + }, nil +} + +func MapFromID(id int) (*Map, error) { + newMap, err := ciliumebpf.NewMapFromID(ciliumebpf.MapID(id)) + if err != nil { + return nil, err + } + + return &Map{ + Map: newMap, + }, nil +} + +// OpenOrCreate tries to open or create the eBPF map identified by the spec in +// the Map object. +func (m *Map) OpenOrCreate() error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.Map != nil { + return nil + } + + if m.spec == nil { + return fmt.Errorf("cannot create map: nil map spec") + } + + opts := ciliumebpf.MapOptions{ + PinPath: bpf.TCGlobalsPath(), + } + + m.spec.Flags |= bpf.GetPreAllocateMapFlags(m.spec.Type) + + path := bpf.MapPath(m.spec.Name) + + if m.spec.Pinning == ciliumebpf.PinByName { + mapDir := filepath.Dir(path) + + if _, err := os.Stat(mapDir); os.IsNotExist(err) { + if err = os.MkdirAll(mapDir, 0755); err != nil { + return &os.PathError{ + Op: "Unable create map base directory", + Path: path, + Err: err, + } + } + } + } + + newMap, err := ciliumebpf.NewMapWithOptions(m.spec, opts) + if err != nil { + if !errors.Is(err, ciliumebpf.ErrMapIncompatible) { + return fmt.Errorf("unable to create map: %w", err) + } + + // There already exists a pinned map but it has a different + // configuration (e.g different type, k/v size or flags). + // Try to delete and recreate it. + + log.WithField("map", m.spec.Name). + WithError(err).Warn("Removing map to allow for property upgrade (expect map data loss)") + + oldMap, err := ciliumebpf.LoadPinnedMap(path, &opts.LoadPinOptions) + if err != nil { + return fmt.Errorf("cannot load pinned map %s: %w", m.spec.Name, err) + } + defer func() { + if err := oldMap.Close(); err != nil { + log.WithField("map", m.spec.Name).Warnf("Cannot close map: %v", err) + } + }() + + if err = oldMap.Unpin(); err != nil { + return fmt.Errorf("cannot unpin map %s: %w", m.spec.Name, err) + } + + newMap, err = ciliumebpf.NewMapWithOptions(m.spec, opts) + if err != nil { + return fmt.Errorf("unable to create map: %w", err) + } + } + + m.Map = newMap + m.path = path + + registerMap(m) + metrics.UpdateMapCapacity(m.spec.Name, m.spec.MaxEntries) + return nil +} + +// IterateWithCallback iterates through all the keys/values of a map, passing +// each key/value pair to the cb callback. +func (m *Map) IterateWithCallback(key, value interface{}, cb IterateCallback) error { + if m.Map == nil { + if err := m.OpenOrCreate(); err != nil { + return err + } + } + + m.lock.RLock() + defer m.lock.RUnlock() + + entries := m.Iterate() + for entries.Next(key, value) { + cb(key, value) + } + + return nil +} + +// GetModel returns a BPF map in the representation served via the API. +func (m *Map) GetModel() *models.BPFMap { + m.lock.RLock() + defer m.lock.RUnlock() + + mapModel := &models.BPFMap{ + Path: m.path, + } + + // TODO: handle map cache. See pkg/bpf/map_linux.go:GetModel() + + return mapModel +} diff --git a/vendor/github.com/cilium/cilium/pkg/ebpf/map_register.go b/vendor/github.com/cilium/cilium/pkg/ebpf/map_register.go new file mode 100644 index 0000000000..61327b6eb2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ebpf/map_register.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package ebpf + +import ( + "path" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/lock" +) + +var ( + mutex lock.RWMutex + mapRegister = map[string]*Map{} +) + +func registerMap(m *Map) { + mutex.Lock() + mapRegister[m.path] = m + mutex.Unlock() + + log.WithField("path", m.path).Debug("Registered BPF map") +} + +// GetMap returns the registered map with the given name or absolute path +func GetMap(name string) *Map { + mutex.RLock() + defer mutex.RUnlock() + + if !path.IsAbs(name) { + name = bpf.MapPath(name) + } + + return mapRegister[name] +} + +// GetOpenMaps returns a slice of all open BPF maps. This is identical to +// calling GetMap() on all open maps. +func GetOpenMaps() []*models.BPFMap { + // create a copy of mapRegister so we can unlock the mutex again as + // locking Map.lock inside of the mutex is not permitted + mutex.RLock() + maps := make([]*Map, 0, len(mapRegister)) + for _, m := range mapRegister { + maps = append(maps, m) + } + mutex.RUnlock() + + mapList := make([]*models.BPFMap, len(maps)) + for i, m := range maps { + mapList[i] = m.GetModel() + } + + return mapList +} diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go new file mode 100644 index 0000000000..57a6a75cc1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package regeneration + +import ( + "context" + + datapath "github.com/cilium/cilium/pkg/datapath/types" + "github.com/cilium/cilium/pkg/fqdn/restore" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/lock" + monitorAPI "github.com/cilium/cilium/pkg/monitor/api" + "github.com/cilium/cilium/pkg/proxy/accesslog" +) + +// Owner is the interface defines the requirements for anybody owning policies. +type Owner interface { + // QueueEndpointBuild puts the given endpoint in the processing queue + QueueEndpointBuild(ctx context.Context, epID uint64) (func(), error) + + // GetCompilationLock returns the mutex responsible for synchronizing compilation + // of BPF programs. + GetCompilationLock() *lock.RWMutex + + // SendNotification is called to emit an agent notification + SendNotification(msg monitorAPI.AgentNotifyMessage) error + + // Datapath returns a reference to the datapath implementation. + Datapath() datapath.Datapath + + // GetDNSRules creates a fresh copy of DNS rules that can be used when + // endpoint is restored on a restart. + // The endpoint lock must not be held while calling this function. + GetDNSRules(epID uint16) restore.DNSRules + + // RemoveRestoredDNSRules removes any restored DNS rules for + // this endpoint from the DNS proxy. + RemoveRestoredDNSRules(epID uint16) +} + +// EndpointInfoSource returns information about an endpoint being proxied. +// The read lock must be held when calling any method. +type EndpointInfoSource interface { + GetID() uint64 + GetIPv4Address() string + GetIPv6Address() string + GetIdentity() identity.NumericIdentity + GetLabels() []string + HasSidecarProxy() bool + ConntrackName() string + ConntrackNameLocked() string +} + +// EndpointUpdater returns information about an endpoint being proxied and +// is called back to update the endpoint when proxy events occur. +// This is a subset of `Endpoint`. +type EndpointUpdater interface { + EndpointInfoSource + // OnProxyPolicyUpdate is called when the proxy acknowledges that it + // has applied a policy. + OnProxyPolicyUpdate(policyRevision uint64) + + // UpdateProxyStatistics updates the Endpoint's proxy statistics to account + // for a new observed flow with the given characteristics. + UpdateProxyStatistics(proxyType, l4Protocol string, port uint16, ingress, request bool, verdict accesslog.FlowVerdict) +} diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go new file mode 100644 index 0000000000..84cdc63de4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package regeneration + +import ( + "context" +) + +// DatapathRegenerationLevel determines what is expected of the datapath when +// a regeneration event is processed. +type DatapathRegenerationLevel int + +const ( + // Invalid is the default level to enforce explicit setting of + // the regeneration level. + Invalid DatapathRegenerationLevel = iota + // RegenerateWithoutDatapath indicates that datapath rebuild or reload + // is not required to implement this regeneration. + RegenerateWithoutDatapath + // RegenerateWithDatapathLoad indicates that the datapath must be + // reloaded but not recompiled to implement this regeneration. + RegenerateWithDatapathLoad + // RegenerateWithDatapathRewrite indicates that the datapath must be + // recompiled and reloaded to implement this regeneration. + RegenerateWithDatapathRewrite + // RegenerateWithDatapathRebuild indicates that the datapath must be + // fully recompiled and reloaded without using any cached templates. + RegenerateWithDatapathRebuild +) + +// String converts a DatapathRegenerationLevel into a human-readable string. +func (r DatapathRegenerationLevel) String() string { + switch r { + case Invalid: + return "invalid" + case RegenerateWithoutDatapath: + return "no-rebuild" + case RegenerateWithDatapathLoad: + return "reload" + case RegenerateWithDatapathRewrite: + return "rewrite+load" + case RegenerateWithDatapathRebuild: + return "compile+load" + default: + break + } + return "BUG: Unknown DatapathRegenerationLevel" +} + +// ExternalRegenerationMetadata contains any information about a regeneration that +// the endpoint subsystem should be made aware of for a given endpoint. +type ExternalRegenerationMetadata struct { + // Reason provides context to source for the regeneration, which is + // used to generate useful log messages. + Reason string + + // RegenerationLevel forces datapath regeneration according to the + // levels defined in the DatapathRegenerationLevel description. + RegenerationLevel DatapathRegenerationLevel + + ParentContext context.Context +} diff --git a/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go b/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go new file mode 100644 index 0000000000..62b82c19d1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package eventqueue implements a queue-based system for event processing in a +// generic fashion in a first-in, first-out manner. +package eventqueue diff --git a/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go b/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go new file mode 100644 index 0000000000..e4bd753c7b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package eventqueue + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/spanstat" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "eventqueue") +) + +// EventQueue is a structure which is utilized to handle Events in a first-in, +// first-out order. An EventQueue may be closed, in which case all events which +// are queued up, but have not been processed yet, will be cancelled (i.e., not +// ran). It is guaranteed that no events will be scheduled onto an EventQueue +// after it has been closed; if any event is attempted to be scheduled onto an +// EventQueue after it has been closed, it will be cancelled immediately. For +// any event to be processed by the EventQueue, it must implement the +// `EventHandler` interface. This allows for different types of events to be +// processed by anything which chooses to utilize an `EventQueue`. +type EventQueue struct { + // events represents the queue of events. This should always be a buffered + // channel. + events chan *Event + + // close is closed once the EventQueue has been closed. + close chan struct{} + + // drain is closed when the EventQueue is stopped. Any Event which is + // Enqueued after this channel is closed will be cancelled / not processed + // by the queue. If an Event has been Enqueued, but has not been processed + // before this channel is closed, it will be cancelled and not processed + // as well. + drain chan struct{} + + // eventQueueOnce is used to ensure that the EventQueue business logic can + // only be ran once. + eventQueueOnce sync.Once + + // closeOnce is used to ensure that the EventQueue can only be closed once. + closeOnce sync.Once + + // name is used to differentiate this EventQueue from other EventQueues that + // are also running in logs + name string + + eventsMu lock.RWMutex + + // eventsClosed is a channel that's closed when the event loop (Run()) + // terminates. + eventsClosed chan struct{} +} + +// NewEventQueue returns an EventQueue with a capacity for only one event at +// a time. +func NewEventQueue() *EventQueue { + return NewEventQueueBuffered("", 1) +} + +// NewEventQueueBuffered returns an EventQueue with a capacity of, +// numBufferedEvents at a time, and all other needed fields initialized. +func NewEventQueueBuffered(name string, numBufferedEvents int) *EventQueue { + log.WithFields(logrus.Fields{ + "name": name, + "numBufferedEvents": numBufferedEvents, + }).Debug("creating new EventQueue") + return &EventQueue{ + name: name, + // Up to numBufferedEvents can be Enqueued until Enqueueing blocks. + events: make(chan *Event, numBufferedEvents), + close: make(chan struct{}), + drain: make(chan struct{}), + eventsClosed: make(chan struct{}), + } +} + +// Enqueue pushes the given event onto the EventQueue. If the queue has been +// stopped, the Event will not be enqueued, and its cancel channel will be +// closed, indicating that the Event was not ran. This function may block if +// the queue is at its capacity for events. If a single Event has Enqueue +// called on it multiple times asynchronously, there is no guarantee as to +// which one will return the channel which passes results back to the caller. +// It is up to the caller to check whether the returned channel is nil, as +// waiting to receive on such a channel will block forever. Returns an error +// if the Event has been previously enqueued, if the Event is nil, or the queue +// itself is not initialized properly. +func (q *EventQueue) Enqueue(ev *Event) (<-chan interface{}, error) { + if q.notSafeToAccess() || ev == nil { + return nil, fmt.Errorf("unable to Enqueue event") + } + + // Events can only be enqueued once. + if !ev.enqueued.CompareAndSwap(false, true) { + return nil, fmt.Errorf("unable to Enqueue event; event has already had Enqueue called on it") + } + + // Multiple Enqueues can occur at the same time. Ensure that events channel + // is not closed while we are enqueueing events. + q.eventsMu.RLock() + defer q.eventsMu.RUnlock() + + select { + // The event should be drained from the queue (e.g., it should not be + // processed). + case <-q.drain: + // Closed eventResults channel signifies cancellation. + close(ev.cancelled) + close(ev.eventResults) + + return ev.eventResults, nil + default: + // The events channel may be closed even if an event has been pushed + // onto the events channel, as events are consumed off of the events + // channel asynchronously! If the EventQueue is closed before this + // event is processed, then it will be cancelled. + + ev.stats.waitEnqueue.Start() + ev.stats.waitConsumeOffQueue.Start() + q.events <- ev + ev.stats.waitEnqueue.End(true) + return ev.eventResults, nil + } +} + +// Event is an event that can be enqueued onto an EventQueue. +type Event struct { + // Metadata is the information about the event which is sent + // by its queuer. Metadata must implement the EventHandler interface in + // order for the Event to be successfully processed by the EventQueue. + Metadata EventHandler + + // eventResults is a channel on which the results of the event are sent. + // It is populated by the EventQueue itself, not by the queuer. This channel + // is closed if the event is cancelled. + eventResults chan interface{} + + // cancelled signals that the given Event was not ran. This can happen + // if the EventQueue processing this Event was closed before the Event was + // Enqueued onto the Event queue, or if the Event was Enqueued onto an + // EventQueue, and the EventQueue on which the Event was scheduled was + // closed. + cancelled chan struct{} + + // stats is a field which contains information about when this event is + // enqueued, dequeued, etc. + stats eventStatistics + + // enqueued specifies whether this event has been enqueued on an EventQueue. + enqueued atomic.Bool +} + +type eventStatistics struct { + + // waitEnqueue shows how long a given event was waiting on the queue before + // it was actually processed. + waitEnqueue spanstat.SpanStat + + // durationStat shows how long the actual processing of the event took. This + // is the time for how long Handle() takes for the event. + durationStat spanstat.SpanStat + + // waitConsumeOffQueue shows how long it took for the event to be consumed + // plus the time it the event waited in the queue. + waitConsumeOffQueue spanstat.SpanStat +} + +// NewEvent returns an Event with all fields initialized. +func NewEvent(meta EventHandler) *Event { + return &Event{ + Metadata: meta, + eventResults: make(chan interface{}, 1), + cancelled: make(chan struct{}), + stats: eventStatistics{}, + } +} + +// WasCancelled returns whether the cancelled channel for the given Event has +// been closed or not. Cancellation occurs if the event was not processed yet +// by an EventQueue onto which this Event was Enqueued, and the queue is closed, +// or if the event was attempted to be scheduled onto an EventQueue which has +// already been closed. +func (ev *Event) WasCancelled() bool { + select { + case <-ev.cancelled: + return true + default: + return false + } +} + +func (ev *Event) printStats(q *EventQueue) { + if option.Config.Debug { + q.getLogger().WithFields(logrus.Fields{ + "eventType": reflect.TypeOf(ev.Metadata).String(), + "eventHandlingDuration": ev.stats.durationStat.Total(), + "eventEnqueueWaitTime": ev.stats.waitEnqueue.Total(), + "eventConsumeOffQueueWaitTime": ev.stats.waitConsumeOffQueue.Total(), + }).Debug("EventQueue event processing statistics") + } +} + +// Run consumes events that have been queued for this EventQueue. It +// is presumed that the eventQueue is a buffered channel with a length of one +// (i.e., only one event can be processed at a time). All business logic for +// handling queued events is contained within this function. The events in the +// queue must implement the EventHandler interface. If the event queue is +// closed, then all events which were queued up, but not processed, are +// cancelled; any event which is currently being processed will not be +// cancelled. +func (q *EventQueue) Run() { + if q.notSafeToAccess() { + return + } + + go q.run() +} + +func (q *EventQueue) run() { + q.eventQueueOnce.Do(func() { + defer close(q.eventsClosed) + for ev := range q.events { + select { + case <-q.drain: + ev.stats.waitConsumeOffQueue.End(false) + close(ev.cancelled) + close(ev.eventResults) + ev.printStats(q) + default: + ev.stats.waitConsumeOffQueue.End(true) + ev.stats.durationStat.Start() + ev.Metadata.Handle(ev.eventResults) + // Always indicate success for now. + ev.stats.durationStat.End(true) + // Ensures that no more results can be sent as the event has + // already been processed. + ev.printStats(q) + close(ev.eventResults) + } + } + }) +} + +func (q *EventQueue) notSafeToAccess() bool { + return q == nil || q.close == nil || q.drain == nil || q.events == nil +} + +// Stop stops any further events from being processed by the EventQueue. Any +// event which is currently being processed by the EventQueue will continue to +// run. All other events waiting to be processed, and all events that may be +// enqueued will not be processed by the event queue; they will be cancelled. +// If the queue has already been stopped, this is a no-op. +func (q *EventQueue) Stop() { + if q.notSafeToAccess() { + return + } + + q.closeOnce.Do(func() { + q.getLogger().Debug("stopping EventQueue") + // Any event that is sent to the queue at this point will be cancelled + // immediately in Enqueue(). + close(q.drain) + + // Signal that the queue has been drained. + close(q.close) + + q.eventsMu.Lock() + close(q.events) + q.eventsMu.Unlock() + }) +} + +// WaitToBeDrained returns the channel which waits for the EventQueue to have been +// stopped. This allows for queuers to ensure that all events in the queue have +// been processed or cancelled. If the queue is nil, returns immediately. +func (q *EventQueue) WaitToBeDrained() { + if q == nil { + return + } + <-q.close + + // If the queue is running, then in-flight events may still be ongoing. + // Wait for them to be completed for the queue to be fully drained. If the + // queue is not running, we must forcefully run it because nothing else + // will so that it can be drained. + go q.run() + <-q.eventsClosed +} + +func (q *EventQueue) getLogger() *logrus.Entry { + return log.WithFields( + logrus.Fields{ + "name": q.name, + }) +} + +// EventHandler is an interface for allowing an EventQueue to handle events +// in a generic way. To be processed by the EventQueue, all event types must +// implement any function specified in this interface. +type EventHandler interface { + Handle(chan interface{}) +} diff --git a/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go b/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go new file mode 100644 index 0000000000..769d3adb51 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// The restore package provides data structures important to restoring +// DNS proxy rules. This package serves as a central source for these +// structures. +// Note that these are marshaled as JSON and any changes need to be compatible +// across an upgrade! +package restore + +import ( + "sort" +) + +// DNSRules contains IP-based DNS rules for a set of ports (e.g., 53) +type DNSRules map[uint16]IPRules + +// IPRules is an unsorted collection of IPrules +type IPRules []IPRule + +// IPRule stores the allowed destination IPs for a DNS names matching a regex +type IPRule struct { + Re RuleRegex + IPs map[string]struct{} // IPs, nil set is wildcard and allows all IPs! +} + +// RuleRegex is a wrapper for a pointer to a string so that we can define marshalers for it. +type RuleRegex struct { + Pattern *string +} + +// Sort is only used for testing +// Sorts in place, but returns IPRules for convenience +func (r IPRules) Sort() IPRules { + sort.SliceStable(r, func(i, j int) bool { + if r[i].Re.Pattern != nil && r[j].Re.Pattern != nil { + return *r[i].Re.Pattern < *r[j].Re.Pattern + } + if r[i].Re.Pattern != nil { + return true + } + return false + }) + + return r +} + +// Sort is only used for testing +// Sorts in place, but returns DNSRules for convenience +func (r DNSRules) Sort() DNSRules { + for port, ipRules := range r { + if len(ipRules) > 0 { + ipRules = ipRules.Sort() + r[port] = ipRules + } + } + return r +} + +// UnmarshalText unmarshals json into a RuleRegex +// This must have a pointer receiver, otherwise the RuleRegex remains empty. +func (r *RuleRegex) UnmarshalText(b []byte) error { + pattern := string(b) + r.Pattern = &pattern + return nil +} + +// MarshalText marshals RuleRegex as string +func (r RuleRegex) MarshalText() ([]byte, error) { + if r.Pattern != nil { + return []byte(*r.Pattern), nil + } + return nil, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go index 0dcfffae1a..95be1b15b8 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go @@ -199,6 +199,9 @@ func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) // and the type params would be missing, so instead we'll just use the // type name + method name. switch hook := hook.(type) { + case augmentedHook: + name, hasHook = getHookFuncName(hook.HookInterface, start) + return case Hook: if start { if hook.OnStart == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/hive/job/job.go b/vendor/github.com/cilium/cilium/pkg/hive/job/job.go new file mode 100644 index 0000000000..0bdc5900f4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hive/job/job.go @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "context" + "errors" + "runtime/pprof" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "k8s.io/client-go/util/workqueue" + + "github.com/cilium/cilium/pkg/hive" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/hive/internal" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/spanstat" + "github.com/cilium/cilium/pkg/stream" +) + +// Cell provides job.Registry which constructs job.Group-s. Job groups automate a lot of the logic involved with +// lifecycle management of goroutines within a Hive Cell. Providing a context that is canceled on shutdown and making +// sure multiple goroutines properly shutdown takes a lot of boilerplate. Job groups make it easy to queue, spawn, and +// collect jobs with minimal boilerplate. The registry maintains references to all groups which will allow us to add +// automatic metrics collection and/or status reporting in the future. +var Cell = cell.Module( + "jobs", + "Jobs", + cell.Provide(newRegistry), + cell.Metric(newJobMetrics), +) + +// A Registry creates Groups, it maintains references to these groups for the purposes of collecting information +// centralized like metrics. +type Registry interface { + // NewGroup creates a new group of jobs which can be started and stopped together as part of the cells lifecycle. + // The provided scope is used to report health status of the jobs. A `cell.Scope` can be obtained via injection + // an object with the correct scope is provided by the closest `cell.Module`. + NewGroup(scope cell.Scope, opts ...groupOpt) Group +} + +type registry struct { + logger logrus.FieldLogger + shutdowner hive.Shutdowner + + metrics *jobMetrics + + mu lock.Mutex + groups []Group +} + +func newRegistry( + logger logrus.FieldLogger, + shutdowner hive.Shutdowner, + metrics *jobMetrics, +) Registry { + return ®istry{ + logger: logger, + shutdowner: shutdowner, + metrics: metrics, + } +} + +// NewGroup creates a new Group with the given `opts` options, which allows you to customize the behavior for the +// group as a whole. For example by allowing you to add pprof labels to the group or by customizing the logger. +func (c *registry) NewGroup(scope cell.Scope, opts ...groupOpt) Group { + c.mu.Lock() + defer c.mu.Unlock() + + var options options + options.logger = c.logger + options.shutdowner = c.shutdowner + options.metrics = c.metrics + + for _, opt := range opts { + opt(&options) + } + + g := &group{ + options: options, + wg: &sync.WaitGroup{}, + scope: scope, + } + + c.groups = append(c.groups, g) + + return g +} + +// Group aims to streamline the management of work within a cell. Group implements cell.HookInterface and takes care +// of proper start and stop behavior as expected by hive. A group allows you to add multiple types of jobs which +// different kinds of logic. No matter the job type, the function provided to is always called with a context which +// is bound to the lifecycle of the cell. +type Group interface { + Add(...Job) + // Scoped creates a scroped group, jobs added to this scoped group will appear as a sub scope in the health reporter + Scoped(name string) ScopedGroup + cell.HookInterface +} + +// Job in an interface that describes a unit of work which can be added to a Group. This interface contains unexported +// methods and thus can only be implemented by functions in this package such as OneShot, Timer, or Observer. +type Job interface { + start(ctx context.Context, wg *sync.WaitGroup, scope cell.Scope, options options) +} + +type group struct { + options options + + wg *sync.WaitGroup + + mu lock.Mutex + ctx context.Context + cancel context.CancelFunc + queuedJobs []Job + + scope cell.Scope +} + +type options struct { + pprofLabels pprof.LabelSet + logger logrus.FieldLogger + shutdowner hive.Shutdowner + metrics *jobMetrics +} + +type groupOpt func(o *options) + +// WithLogger replaces the default logger with the given logger, useful if you want to add certain fields to the logs +// created by the group/jobs. +func WithLogger(logger logrus.FieldLogger) groupOpt { + return func(o *options) { + o.logger = logger + } +} + +// WithPprofLabels adds pprof labels which will be added to the goroutines spawned for the jobs and thus included in +// the pprof profiles. +func WithPprofLabels(pprofLabels pprof.LabelSet) groupOpt { + return func(o *options) { + o.pprofLabels = pprofLabels + } +} + +var _ cell.HookInterface = (*group)(nil) + +// Start implements the cell.HookInterface interface +func (jg *group) Start(_ cell.HookContext) error { + jg.mu.Lock() + defer jg.mu.Unlock() + + jg.ctx, jg.cancel = context.WithCancel(context.Background()) + + jg.wg.Add(len(jg.queuedJobs)) + for _, job := range jg.queuedJobs { + pprof.Do(jg.ctx, jg.options.pprofLabels, func(ctx context.Context) { + go job.start(ctx, jg.wg, jg.scope, jg.options) + }) + } + // Nil the queue once we start so it can be GC'ed + jg.queuedJobs = nil + + return nil +} + +// Stop implements the cell.HookInterface interface +func (jg *group) Stop(stopCtx cell.HookContext) error { + jg.mu.Lock() + defer jg.mu.Unlock() + + done := make(chan struct{}) + go func() { + jg.wg.Wait() + close(done) + }() + + jg.cancel() + + select { + case <-stopCtx.Done(): + jg.options.logger.Error("Stop hook context expired before job group was done") + case <-done: + } + + return nil +} + +func (jg *group) Add(jobs ...Job) { + jg.add(jg.scope, jobs...) +} + +func (jg *group) add(scope cell.Scope, jobs ...Job) { + jg.mu.Lock() + defer jg.mu.Unlock() + + // The context is only set once the group has been started. If we have not yet started, queue the jobs. + if jg.ctx == nil { + jg.queuedJobs = append(jg.queuedJobs, jobs...) + return + } + + for _, j := range jobs { + jg.wg.Add(1) + pprof.Do(jg.ctx, jg.options.pprofLabels, func(ctx context.Context) { + go j.start(ctx, jg.wg, scope, jg.options) + }) + } +} + +// Scoped creates a scroped group, jobs added to this scoped group will appear as a sub scope in the health reporter +func (jg *group) Scoped(name string) ScopedGroup { + return &scopedGroup{ + group: jg, + scope: cell.GetSubScope(jg.scope, name), + } +} + +type ScopedGroup interface { + Add(jobs ...Job) +} + +type scopedGroup struct { + group *group + scope cell.Scope +} + +func (sg *scopedGroup) Add(jobs ...Job) { + sg.group.add(sg.scope, jobs...) +} + +// OneShot creates a "One shot" job which can be added to a Group. The function passed to a one shot job is invoked +// once at startup. It can live for the entire lifetime of the group or exit early depending on its task. +// If it returns an error, it can optionally be retried if the WithRetry option. If retries are not configured or +// all retries failed as well, a shutdown of the hive can be triggered by specifying the WithShutdown option. +// +// The given function is expected to exit as soon as the context given to it expires, this is especially important for +// blocking or long running jobs. +func OneShot(name string, fn OneShotFunc, opts ...jobOneShotOpt) Job { + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobOneShot{ + name: name, + fn: fn, + opts: opts, + } + + return job +} + +type jobOneShotOpt func(*jobOneShot) + +// WithRetry option configures a one shot job to retry `times` amount of times. Each retry attempt the `backoff` +// ratelimiter is consulted to check how long the job should wait before making another attempt. +func WithRetry(times int, backoff workqueue.RateLimiter) jobOneShotOpt { + return func(jos *jobOneShot) { + jos.retry = times + jos.backoff = backoff + } +} + +// WithShutdown option configures a one shot job to shutdown the whole hive if the job returns an error. If the +// WithRetry option is also configured, all retries must be exhausted before we trigger the shutdown. +func WithShutdown() jobOneShotOpt { + return func(jos *jobOneShot) { + jos.shutdownOnError = true + } +} + +// WithMetrics option enabled metrics collection for this one shot job. This option should only be used +// for short running jobs. Metrics use the jobs name as label, so if jobs are spawned dynamically +// make sure to use the same job name to keep metric cardinality low. +func WithMetrics() jobOneShotOpt { + return func(jos *jobOneShot) { + jos.metrics = true + } +} + +// OneShotFunc is the function type which is invoked by a one shot job. The given function is expected to exit as soon +// as the context given to it expires, this is especially important for blocking or long running jobs. +type OneShotFunc func(ctx context.Context, health cell.HealthReporter) error + +type jobOneShot struct { + name string + fn OneShotFunc + opts []jobOneShotOpt + + health cell.HealthReporter + + // If retry > 0, retry on error x times. + retry int + backoff workqueue.RateLimiter + shutdownOnError bool + metrics bool +} + +func (jos *jobOneShot) start(ctx context.Context, wg *sync.WaitGroup, scope cell.Scope, options options) { + defer wg.Done() + + for _, opt := range jos.opts { + opt(jos) + } + + jos.health = cell.GetHealthReporter(scope, "job-"+jos.name) + defer jos.health.Stopped("one-shot job done") + + l := options.logger.WithFields(logrus.Fields{ + "name": jos.name, + "func": internal.FuncNameAndLocation(jos.fn), + }) + + stat := &spanstat.SpanStat{} + + timer, cancel := inctimer.New() + defer cancel() + + var err error + for i := 0; i <= jos.retry; i++ { + var timeout time.Duration + if i != 0 { + timeout = jos.backoff.When(jos) + l.WithFields(logrus.Fields{ + "backoff": timeout, + "retry-count": i, + }).Debug("Delaying retry attempt") + } + + select { + case <-ctx.Done(): + return + case <-timer.After(timeout): + } + + l.Debug("Starting one-shot job") + + if jos.metrics { + stat.Start() + } + + jos.health.OK("Running") + err = jos.fn(ctx, jos.health) + + if jos.metrics { + sec := stat.End(true).Seconds() + options.metrics.OneShotRunDuration.WithLabelValues(jos.name).Observe(sec) + stat.Reset() + } + + if err == nil { + return + } else if !errors.Is(err, context.Canceled) { + jos.health.Degraded("one-shot job errored", err) + l.WithError(err).Error("one-shot job errored") + options.metrics.JobErrorsTotal.WithLabelValues(jos.name).Inc() + } + } + + if options.shutdowner != nil && jos.shutdownOnError { + options.shutdowner.Shutdown(hive.ShutdownWithError(err)) + } +} + +// Timer creates a timer job which can be added to a Group. Timer jobs invoke the given function at the specified +// interval. Timer jobs are particularly useful to implement periodic syncs and cleanup actions. +// Timer jobs can optionally be triggered by an external Trigger with the WithTrigger option. +// This trigger can for example be passed between cells or between jobs in the same cell to allow for an additional +// invocation of the function. +// +// The interval between invocations is counted from the start of the last invocation. If the `fn` takes longer than the +// interval, its next invocation is not delayed. The `fn` is expected to stop as soon as the context passed to it +// expires. This is especially important for long running functions. The signal created by a Trigger is coalesced so +// multiple calls to trigger before the invocation takes place can result in just a single invocation. +func Timer(name string, fn TimerFunc, interval time.Duration, opts ...timerOpt) Job { + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobTimer{ + name: name, + fn: fn, + interval: interval, + opts: opts, + } + + return job +} + +// TimerFunc is the func type invoked by a timer job. A TimerFunc is expected to return as soon as the ctx expires. +type TimerFunc func(ctx context.Context) error + +type timerOpt func(*jobTimer) + +// Trigger which can be used to trigger a timer job, trigger events are coalesced. +type Trigger interface { + _trigger() + Trigger() +} + +// NewTrigger creates a new trigger, which can be used to trigger a timer job. +func NewTrigger() *trigger { + return &trigger{ + c: make(chan struct{}, 1), + } +} + +type trigger struct { + c chan struct{} +} + +func (t *trigger) _trigger() {} + +func (t *trigger) Trigger() { + select { + case t.c <- struct{}{}: + default: + } +} + +// WithTrigger option allows a user to specify a trigger, which if triggered will invoke the function of a timer +// before the configured interval has expired. +func WithTrigger(trig Trigger) timerOpt { + return func(jt *jobTimer) { + jt.trigger = trig.(*trigger) + } +} + +type jobTimer struct { + name string + fn TimerFunc + opts []timerOpt + + health cell.HealthReporter + + interval time.Duration + trigger *trigger + + // If not nil, call the shutdowner on error + shutdown hive.Shutdowner +} + +func (jt *jobTimer) start(ctx context.Context, wg *sync.WaitGroup, scope cell.Scope, options options) { + defer wg.Done() + + for _, opt := range jt.opts { + opt(jt) + } + + jt.health = cell.GetHealthReporter(scope, "timer-job-"+jt.name) + + l := options.logger.WithFields(logrus.Fields{ + "name": jt.name, + "func": internal.FuncNameAndLocation(jt.fn), + }) + + timer := time.NewTicker(jt.interval) + defer timer.Stop() + + var triggerChan chan struct{} + if jt.trigger != nil { + triggerChan = jt.trigger.c + } + + l.Debug("Starting timer job") + jt.health.OK("Primed") + + stat := &spanstat.SpanStat{} + + for { + select { + case <-ctx.Done(): + jt.health.Stopped("timer job context done") + return + case <-timer.C: + case <-triggerChan: + } + + l.Debug("Timer job triggered") + + stat.Start() + + err := jt.fn(ctx) + + total := stat.End(true).Total() + options.metrics.TimerRunDuration.WithLabelValues(jt.name).Observe(total.Seconds()) + stat.Reset() + + if err == nil { + jt.health.OK("OK (" + total.String() + ")") + l.Debug("Timer job finished") + } else if !errors.Is(err, context.Canceled) { + jt.health.Degraded("timer job errored", err) + l.WithError(err).Error("Timer job errored") + + options.metrics.JobErrorsTotal.WithLabelValues(jt.name).Inc() + if jt.shutdown != nil { + jt.shutdown.Shutdown(hive.ShutdownWithError(err)) + } + } + + // If we exited due to the ctx closing we do not guaranteed return. + // The select can pick the timer or trigger signals over ctx.Done due to fair scheduling, so this guarantees it. + if ctx.Err() != nil { + return + } + } +} + +// AddObserver adds an observer job to the group. Observer jobs invoke the given `fn` for each item observed on +// `observable`. If the `observable` completes, the job stops. The context given to the observable is also canceled +// once the group stops. +func Observer[T any](name string, fn ObserverFunc[T], observable stream.Observable[T], opts ...observerOpt[T]) Job { + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobObserver[T]{ + name: name, + fn: fn, + observable: observable, + opts: opts, + } + + return job +} + +// ObserverFunc is the func type invoked by observer jobs. +// A ObserverFunc is expected to return as soon as ctx is canceled. +type ObserverFunc[T any] func(ctx context.Context, event T) error + +type observerOpt[T any] func(*jobObserver[T]) + +type jobObserver[T any] struct { + name string + fn ObserverFunc[T] + opts []observerOpt[T] + + health cell.HealthReporter + + observable stream.Observable[T] + + // If not nil, call the shutdowner on error + shutdown hive.Shutdowner +} + +func (jo *jobObserver[T]) start(ctx context.Context, wg *sync.WaitGroup, scope cell.Scope, options options) { + defer wg.Done() + + for _, opt := range jo.opts { + opt(jo) + } + + jo.health = cell.GetHealthReporter(scope, "observer-job-"+jo.name) + reportTicker := time.NewTicker(10 * time.Second) + defer reportTicker.Stop() + + l := options.logger.WithFields(logrus.Fields{ + "name": jo.name, + "func": internal.FuncNameAndLocation(jo.fn), + }) + + l.Debug("Observer job started") + jo.health.OK("Primed") + var msgCount uint64 + + done := make(chan struct{}) + + var ( + stat = &spanstat.SpanStat{} + err error + ) + jo.observable.Observe(ctx, func(t T) { + stat.Start() + + err := jo.fn(ctx, t) + + total := stat.End(true).Total() + options.metrics.ObserverRunDuration.WithLabelValues(jo.name).Observe(total.Seconds()) + stat.Reset() + + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + jo.health.Degraded("observer job errored", err) + l.WithError(err).Error("Observer job errored") + options.metrics.JobErrorsTotal.WithLabelValues(jo.name).Inc() + if jo.shutdown != nil { + jo.shutdown.Shutdown(hive.ShutdownWithError( + err, + )) + } + return + } + + msgCount++ + + // Don't report health for every event, only when we have not done so for a bit + select { + case <-reportTicker.C: + jo.health.OK("OK (" + total.String() + ") [" + strconv.FormatUint(msgCount, 10) + "]") + default: + } + }, func(e error) { + err = e + close(done) + }) + + <-done + + jo.health.Stopped("observer job done") + if err != nil { + l.WithError(err).Error("Observer job stopped with an error") + } else { + l.WithError(err).Debug("Observer job stopped") + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/job/metrics.go b/vendor/github.com/cilium/cilium/pkg/hive/job/metrics.go new file mode 100644 index 0000000000..18e7448ed2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hive/job/metrics.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/metrics/metric" +) + +type jobMetrics struct { + JobErrorsTotal metric.Vec[metric.Counter] + OneShotRunDuration metric.Vec[metric.Observer] + TimerRunDuration metric.Vec[metric.Observer] + ObserverRunDuration metric.Vec[metric.Observer] +} + +func newJobMetrics() *jobMetrics { + return &jobMetrics{ + JobErrorsTotal: metric.NewCounterVec(metric.CounterOpts{ + ConfigName: metrics.Namespace + "jobs_errors_total", + Namespace: metrics.Namespace, + Subsystem: "jobs", + Name: "errors_total", + Help: "The amount of errors encountered while running jobs", + }, []string{"job"}), + OneShotRunDuration: metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: metrics.Namespace + "jobs_one_shot_run_seconds", + Namespace: metrics.Namespace, + Subsystem: "jobs", + Name: "one_shot_run_seconds", + Help: "The run time of a one shot job", + }, []string{"job"}), + TimerRunDuration: metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: metrics.Namespace + "jobs_timer_run_seconds", + Namespace: metrics.Namespace, + Subsystem: "jobs", + Name: "timer_run_seconds", + Help: "The run time of a timer job", + }, []string{"job"}), + ObserverRunDuration: metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: metrics.Namespace + "jobs_observer_run_seconds", + Namespace: metrics.Namespace, + Subsystem: "jobs", + Name: "observer_run_seconds", + Help: "The run time of a observer job", + }, []string{"job"}), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go new file mode 100644 index 0000000000..c637cea34a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go @@ -0,0 +1,575 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cache + +import ( + "context" + "errors" + "fmt" + "path" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/allocator" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/key" + "github.com/cilium/cilium/pkg/idpool" + clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + "github.com/cilium/cilium/pkg/k8s/identitybackend" + "github.com/cilium/cilium/pkg/kvstore" + kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/stream" +) + +var ( + // IdentitiesPath is the path to where identities are stored in the + // key-value store. + IdentitiesPath = path.Join(kvstore.BaseKeyPrefix, "state", "identities", "v1") +) + +// CachingIdentityAllocator manages the allocation of identities for both +// global and local identities. +type CachingIdentityAllocator struct { + // IdentityAllocator is an allocator for security identities from the + // kvstore. + IdentityAllocator *allocator.Allocator + + // globalIdentityAllocatorInitialized is closed whenever the global identity + // allocator is initialized. + globalIdentityAllocatorInitialized chan struct{} + + localIdentities *localIdentityCache + + localNodeIdentities *localIdentityCache + + identitiesPath string + + // This field exists is to hand out references that are either for sending + // and receiving. It should not be used directly without converting it first + // to a AllocatorEventSendChan or AllocatorEventRecvChan. + events allocator.AllocatorEventChan + watcher identityWatcher + + // setupMutex synchronizes InitIdentityAllocator() and Close() + setupMutex lock.Mutex + + owner IdentityAllocatorOwner +} + +// IdentityAllocatorOwner is the interface the owner of an identity allocator +// must implement +type IdentityAllocatorOwner interface { + // UpdateIdentities will be called when identities have changed + // + // The caller is responsible for making sure the same identity + // is not present in both 'added' and 'deleted', so that they + // can be processed in either order. + UpdateIdentities(added, deleted IdentityCache) + + // GetSuffix must return the node specific suffix to use + GetNodeSuffix() string +} + +// IdentityAllocator is any type which is responsible for allocating security +// identities based of sets of labels, and caching information about identities +// locally. +type IdentityAllocator interface { + // Identity changes are observable. + stream.Observable[IdentityChange] + + // WaitForInitialGlobalIdentities waits for the initial set of global + // security identities to have been received. + WaitForInitialGlobalIdentities(context.Context) error + + // AllocateIdentity allocates an identity described by the specified labels. + // A possible previously used numeric identity for these labels can be passed + // in as the last parameter; identity.InvalidIdentity must be passed if no + // previous numeric identity exists. + AllocateIdentity(context.Context, labels.Labels, bool, identity.NumericIdentity) (*identity.Identity, bool, error) + + // Release is the reverse operation of AllocateIdentity() and releases the + // specified identity. + Release(context.Context, *identity.Identity, bool) (released bool, err error) + + // ReleaseSlice is the slice variant of Release(). + ReleaseSlice(context.Context, []*identity.Identity) error + + // LookupIdentityByID returns the identity that corresponds to the given + // labels. + LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity + + // LookupIdentityByID returns the identity that corresponds to the given + // numeric identity. + LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity + + // GetIdentityCache returns the current cache of identities that the + // allocator has allocated. The caller should not modify the resulting + // identities by pointer. + GetIdentityCache() IdentityCache + + // GetIdentities returns a copy of the current cache of identities. + GetIdentities() IdentitiesModel + + // WithholdLocalIdentities holds a set of numeric identities out of the local + // allocation pool(s). Once withheld, a numeric identity can only be used + // when explicitly requested via AllocateIdentity(..., oldNID). + WithholdLocalIdentities(nids []identity.NumericIdentity) + + // UnwithholdLocalIdentities removes numeric identities from the withheld set, + // freeing them for general allocation. + UnwithholdLocalIdentities(nids []identity.NumericIdentity) +} + +// InitIdentityAllocator creates the global identity allocator. Only the first +// invocation of this function will have an effect. The Caller must have +// initialized well known identities before calling this (by calling +// identity.InitWellKnownIdentities()). +// The client is only used by the CRD identity allocator currently. +// Returns a channel which is closed when initialization of the allocator is +// completed. +// TODO: identity backends are initialized directly in this function, pulling +// in dependencies on kvstore and k8s. It would be better to decouple this, +// since the backends are an interface. +func (m *CachingIdentityAllocator) InitIdentityAllocator(client clientset.Interface) <-chan struct{} { + m.setupMutex.Lock() + defer m.setupMutex.Unlock() + + if m.IdentityAllocator != nil { + log.Panic("InitIdentityAllocator() in succession without calling Close()") + } + + log.Info("Initializing identity allocator") + + minID := idpool.ID(identity.GetMinimalAllocationIdentity()) + maxID := idpool.ID(identity.GetMaximumAllocationIdentity()) + + log.WithFields(map[string]interface{}{ + "min": minID, + "max": maxID, + "cluster-id": option.Config.ClusterID, + }).Info("Allocating identities between range") + + // In the case of the allocator being closed, we need to create a new events channel + // and start a new watch. + if m.events == nil { + m.events = make(allocator.AllocatorEventChan, eventsQueueSize) + m.watcher.watch(m.events) + } + + // Asynchronously set up the global identity allocator since it connects + // to the kvstore. + go func(owner IdentityAllocatorOwner, events allocator.AllocatorEventSendChan, minID, maxID idpool.ID) { + m.setupMutex.Lock() + defer m.setupMutex.Unlock() + + var ( + backend allocator.Backend + err error + ) + + switch option.Config.IdentityAllocationMode { + case option.IdentityAllocationModeKVstore: + log.Debug("Identity allocation backed by KVStore") + backend, err = kvstoreallocator.NewKVStoreBackend(m.identitiesPath, owner.GetNodeSuffix(), &key.GlobalIdentity{}, kvstore.Client()) + if err != nil { + log.WithError(err).Fatal("Unable to initialize kvstore backend for identity allocation") + } + + case option.IdentityAllocationModeCRD: + log.Debug("Identity allocation backed by CRD") + backend, err = identitybackend.NewCRDBackend(identitybackend.CRDBackendConfiguration{ + Store: nil, + Client: client, + KeyFunc: (&key.GlobalIdentity{}).PutKeyFromMap, + }) + if err != nil { + log.WithError(err).Fatal("Unable to initialize Kubernetes CRD backend for identity allocation") + } + + default: + log.Fatalf("Unsupported identity allocation mode %s", option.Config.IdentityAllocationMode) + } + + a, err := allocator.NewAllocator(&key.GlobalIdentity{}, backend, + allocator.WithMax(maxID), allocator.WithMin(minID), + allocator.WithEvents(events), + allocator.WithMasterKeyProtection(), + allocator.WithPrefixMask(idpool.ID(option.Config.ClusterID< identity.MaxNumericIdentity { + return nil, false, fmt.Errorf("%d: numeric identity too large", idp) + } + + if option.Config.Debug { + log.WithFields(logrus.Fields{ + logfields.Identity: idp, + logfields.IdentityLabels: lbls.String(), + "isNew": isNew, + "isNewLocally": isNewLocally, + }).Debug("Resolved identity") + } + + return identity.NewIdentity(identity.NumericIdentity(idp), lbls), isNew, nil +} + +func (m *CachingIdentityAllocator) WithholdLocalIdentities(nids []identity.NumericIdentity) { + log.WithField(logfields.Identity, nids).Debug("Withholding numeric identities for later restoration") + + // The allocators will return any identities that are not in-scope. + nids = m.localIdentities.withhold(nids) + nids = m.localNodeIdentities.withhold(nids) + if len(nids) > 0 { + log.WithField(logfields.Identity, nids).Error("Attempt to restore invalid numeric identities.") + } +} + +func (m *CachingIdentityAllocator) UnwithholdLocalIdentities(nids []identity.NumericIdentity) { + log.WithField(logfields.Identity, nids).Debug("Unwithholding numeric identities") + + // The allocators will ignore any identities that are not in-scope. + m.localIdentities.unwithhold(nids) + m.localNodeIdentities.unwithhold(nids) +} + +// Release is the reverse operation of AllocateIdentity() and releases the +// identity again. This function may result in kvstore operations. +// After the last user has released the ID, the returned lastUse value is true. +func (m *CachingIdentityAllocator) Release(ctx context.Context, id *identity.Identity, notifyOwner bool) (released bool, err error) { + defer func() { + if released { + if id.ID.HasLocalScope() { + metrics.Identity.WithLabelValues(identity.NodeLocalIdentityType).Dec() + } else if id.ID.HasRemoteNodeScope() { + metrics.Identity.WithLabelValues(identity.RemoteNodeIdentityType).Dec() + } else if id.ID.IsReservedIdentity() { + metrics.Identity.WithLabelValues(identity.ReservedIdentityType).Dec() + } else { + metrics.Identity.WithLabelValues(identity.ClusterLocalIdentityType).Dec() + } + } + if m.owner != nil && released && notifyOwner { + deleted := IdentityCache{ + id.ID: id.LabelArray, + } + m.owner.UpdateIdentities(nil, deleted) + } + }() + + // Ignore reserved identities. + if id.IsReserved() { + return false, nil + } + + switch identity.ScopeForLabels(id.Labels) { + case identity.IdentityScopeLocal: + return m.localIdentities.release(id, notifyOwner), nil + case identity.IdentityScopeRemoteNode: + return m.localNodeIdentities.release(id, notifyOwner), nil + } + + // This will block until the kvstore can be accessed and all identities + // were successfully synced + err = m.WaitForInitialGlobalIdentities(ctx) + if err != nil { + return false, err + } + + if m.IdentityAllocator == nil { + return false, fmt.Errorf("allocator not initialized") + } + + // Rely on the eventual Kv-Store events for delete + // notifications of kv-store allocated identities. Even if an + // ID is no longer used locally, it may still be used by + // remote nodes, so we can't rely on the locally computed + // "lastUse". + return m.IdentityAllocator.Release(ctx, &key.GlobalIdentity{LabelArray: id.LabelArray}) +} + +// ReleaseSlice attempts to release a set of identities. It is a helper +// function that may be useful for cleaning up multiple identities in paths +// where several identities may be allocated and another error means that they +// should all be released. +func (m *CachingIdentityAllocator) ReleaseSlice(ctx context.Context, identities []*identity.Identity) error { + var err error + for _, id := range identities { + if id == nil { + continue + } + _, err2 := m.Release(ctx, id, false) + if err2 != nil { + log.WithError(err2).WithFields(logrus.Fields{ + logfields.Identity: id, + }).Error("Failed to release identity") + err = err2 + } + } + return err +} + +// WatchRemoteIdentities returns a RemoteCache instance which can be later +// started to watch identities in another kvstore and sync them to the local +// identity cache. remoteName should be unique unless replacing an existing +// remote's backend. When cachedPrefix is set, identities are assumed to be +// stored under the "cilium/cache" prefix, and the watcher is adapted accordingly. +func (m *CachingIdentityAllocator) WatchRemoteIdentities(remoteName string, backend kvstore.BackendOperations, cachedPrefix bool) (*allocator.RemoteCache, error) { + <-m.globalIdentityAllocatorInitialized + + prefix := m.identitiesPath + if cachedPrefix { + prefix = path.Join(kvstore.StateToCachePrefix(prefix), remoteName) + } + + remoteAllocatorBackend, err := kvstoreallocator.NewKVStoreBackend(prefix, m.owner.GetNodeSuffix(), &key.GlobalIdentity{}, backend) + if err != nil { + return nil, fmt.Errorf("error setting up remote allocator backend: %s", err) + } + + remoteAlloc, err := allocator.NewAllocator(&key.GlobalIdentity{}, remoteAllocatorBackend, + allocator.WithEvents(m.IdentityAllocator.GetEvents()), allocator.WithoutGC(), allocator.WithoutAutostart()) + if err != nil { + return nil, fmt.Errorf("unable to initialize remote Identity Allocator: %s", err) + } + + return m.IdentityAllocator.NewRemoteCache(remoteName, remoteAlloc), nil +} + +func (m *CachingIdentityAllocator) RemoveRemoteIdentities(name string) { + if m.IdentityAllocator != nil { + m.IdentityAllocator.RemoveRemoteKVStore(name) + } +} + +type IdentityChangeKind string + +const ( + IdentityChangeSync IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeSync) + IdentityChangeUpsert IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeUpsert) + IdentityChangeDelete IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeDelete) +) + +type IdentityChange struct { + Kind IdentityChangeKind + ID identity.NumericIdentity + Labels labels.Labels +} + +// Observe the identity changes. Conforms to stream.Observable. +// Replays the current state of the cache when subscribing. +func (m *CachingIdentityAllocator) Observe(ctx context.Context, next func(IdentityChange), complete func(error)) { + // This short-lived go routine serves the purpose of waiting for the global identity allocator becoming ready + // before starting to observe the underlying allocator for changes. + // m.IdentityAllocator is backed by a stream.FuncObservable, that will start its own + // go routine. Therefore, the current go routine will stop and free the lock on the setupMutex after the registration. + go func() { + if err := m.WaitForInitialGlobalIdentities(ctx); err != nil { + complete(ctx.Err()) + return + } + + m.setupMutex.Lock() + defer m.setupMutex.Unlock() + + if m.IdentityAllocator == nil { + complete(errors.New("allocator no longer initialized")) + return + } + + // Observe the underlying allocator for changes and map the events to identities. + stream.Map[allocator.AllocatorChange, IdentityChange]( + m.IdentityAllocator, + func(change allocator.AllocatorChange) IdentityChange { + return IdentityChange{ + Kind: IdentityChangeKind(change.Kind), + ID: identity.NumericIdentity(change.ID), + Labels: mapLabels(change.Key), + } + }, + ).Observe(ctx, next, complete) + }() +} + +func mapLabels(allocatorKey allocator.AllocatorKey) labels.Labels { + var idLabels labels.Labels = nil + + if allocatorKey != nil { + idLabels = labels.Labels{} + for k, v := range allocatorKey.GetAsMap() { + label := labels.ParseLabel(k + "=" + v) + idLabels[label.Key] = label + } + } + + return idLabels +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go new file mode 100644 index 0000000000..7a0b4c1941 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cache + +import ( + "context" + "reflect" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/allocator" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/key" + identitymodel "github.com/cilium/cilium/pkg/identity/model" + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "identity-cache") +) + +// IdentityCache is a cache of identity to labels mapping +type IdentityCache map[identity.NumericIdentity]labels.LabelArray + +// IdentitiesModel is a wrapper so that we can implement the sort.Interface +// to sort the slice by ID +type IdentitiesModel []*models.Identity + +// Less returns true if the element in index `i` is lower than the element +// in index `j` +func (s IdentitiesModel) Less(i, j int) bool { + return s[i].ID < s[j].ID +} + +// FromIdentityCache populates the provided model from an identity cache. +func (s IdentitiesModel) FromIdentityCache(cache IdentityCache) IdentitiesModel { + for id, lbls := range cache { + s = append(s, identitymodel.CreateModel(&identity.Identity{ + ID: id, + Labels: lbls.Labels(), + })) + } + return s +} + +// GetIdentityCache returns a cache of all known identities +func (m *CachingIdentityAllocator) GetIdentityCache() IdentityCache { + log.Debug("getting identity cache for identity allocator manager") + cache := IdentityCache{} + + if m.isGlobalIdentityAllocatorInitialized() { + m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { + if val != nil { + if gi, ok := val.(*key.GlobalIdentity); ok { + cache[identity.NumericIdentity(id)] = gi.LabelArray + } else { + log.Warningf("Ignoring unknown identity type '%s': %+v", + reflect.TypeOf(val), val) + } + } + }) + } + + identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) { + cache[ni] = id.Labels.LabelArray() + }) + + for _, identity := range m.localIdentities.GetIdentities() { + cache[identity.ID] = identity.Labels.LabelArray() + } + for _, identity := range m.localNodeIdentities.GetIdentities() { + cache[identity.ID] = identity.Labels.LabelArray() + } + + return cache +} + +// GetIdentities returns all known identities +func (m *CachingIdentityAllocator) GetIdentities() IdentitiesModel { + identities := IdentitiesModel{} + + if m.isGlobalIdentityAllocatorInitialized() { + m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) { + if gi, ok := val.(*key.GlobalIdentity); ok { + identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), gi.LabelArray) + identities = append(identities, identitymodel.CreateModel(identity)) + } + + }) + } + identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) { + identities = append(identities, identitymodel.CreateModel(id)) + }) + + for _, v := range m.localIdentities.GetIdentities() { + identities = append(identities, identitymodel.CreateModel(v)) + } + for _, v := range m.localNodeIdentities.GetIdentities() { + identities = append(identities, identitymodel.CreateModel(v)) + } + + return identities +} + +type identityWatcher struct { + owner IdentityAllocatorOwner +} + +// collectEvent records the 'event' as an added or deleted identity, +// and makes sure that any identity is present in only one of the sets +// (added or deleted). +func collectEvent(event allocator.AllocatorEvent, added, deleted IdentityCache) bool { + id := identity.NumericIdentity(event.ID) + // Only create events have the key + if event.Typ == kvstore.EventTypeCreate { + if gi, ok := event.Key.(*key.GlobalIdentity); ok { + // Un-delete the added ID if previously + // 'deleted' so that collected events can be + // processed in any order. + delete(deleted, id) + added[id] = gi.LabelArray + return true + } + log.Warningf("collectEvent: Ignoring unknown identity type '%s': %+v", + reflect.TypeOf(event.Key), event.Key) + return false + } + // Reverse an add when subsequently deleted + delete(added, id) + // record the id deleted even if an add was reversed, as the + // id may also have previously existed, in which case the + // result is not no-op! + deleted[id] = labels.LabelArray{} + + return true +} + +// watch starts the identity watcher +func (w *identityWatcher) watch(events allocator.AllocatorEventRecvChan) { + + go func() { + for { + added := IdentityCache{} + deleted := IdentityCache{} + First: + for { + event, ok := <-events + // Wait for one identity add or delete or stop + if !ok { + // 'events' was closed + return + } + // Collect first added and deleted labels + switch event.Typ { + case kvstore.EventTypeCreate, kvstore.EventTypeDelete: + if collectEvent(event, added, deleted) { + // First event collected + break First + } + default: + // Ignore modify events + } + } + + More: + for { + // see if there is more, but do not wait nor stop + select { + case event, ok := <-events: + if !ok { + // 'events' was closed + break More + } + // Collect more added and deleted labels + switch event.Typ { + case kvstore.EventTypeCreate, kvstore.EventTypeDelete: + collectEvent(event, added, deleted) + default: + // Ignore modify events + } + default: + // No more events available without blocking + break More + } + } + // Issue collected updates + w.owner.UpdateIdentities(added, deleted) // disjoint sets + } + }() +} + +// isGlobalIdentityAllocatorInitialized returns true if m.IdentityAllocator is not nil. +// Note: This does not mean that the identities have been synchronized, +// see WaitForInitialGlobalIdentities to wait for a fully populated cache. +func (m *CachingIdentityAllocator) isGlobalIdentityAllocatorInitialized() bool { + select { + case <-m.globalIdentityAllocatorInitialized: + return m.IdentityAllocator != nil + default: + return false + } +} + +// LookupIdentity looks up the identity by its labels but does not create it. +// This function will first search through the local cache, then the caches for +// remote kvstores and finally fall back to the main kvstore. +// May return nil for lookups if the allocator has not yet been synchronized. +func (m *CachingIdentityAllocator) LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity { + if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil { + return reservedIdentity + } + + switch identity.ScopeForLabels(lbls) { + case identity.IdentityScopeLocal: + return m.localIdentities.lookup(lbls) + case identity.IdentityScopeRemoteNode: + return m.localNodeIdentities.lookup(lbls) + } + + if !m.isGlobalIdentityAllocatorInitialized() { + return nil + } + + lblArray := lbls.LabelArray() + id, err := m.IdentityAllocator.GetIncludeRemoteCaches(ctx, &key.GlobalIdentity{LabelArray: lblArray}) + if err != nil { + return nil + } + if id > identity.MaxNumericIdentity { + return nil + } + + if id == idpool.NoID { + return nil + } + + return identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), lblArray) +} + +var unknownIdentity = identity.NewIdentity(identity.IdentityUnknown, labels.Labels{labels.IDNameUnknown: labels.NewLabel(labels.IDNameUnknown, "", labels.LabelSourceReserved)}) + +// LookupIdentityByID returns the identity by ID. This function will first +// search through the local cache, then the caches for remote kvstores and +// finally fall back to the main kvstore +// May return nil for lookups if the allocator has not yet been synchronized. +func (m *CachingIdentityAllocator) LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity { + if id == identity.IdentityUnknown { + return unknownIdentity + } + + if identity := identity.LookupReservedIdentity(id); identity != nil { + return identity + } + + switch id.Scope() { + case identity.IdentityScopeLocal: + return m.localIdentities.lookupByID(id) + case identity.IdentityScopeRemoteNode: + return m.localNodeIdentities.lookupByID(id) + } + + if !m.isGlobalIdentityAllocatorInitialized() { + return nil + } + + allocatorKey, err := m.IdentityAllocator.GetByIDIncludeRemoteCaches(ctx, idpool.ID(id)) + if err != nil { + return nil + } + + if gi, ok := allocatorKey.(*key.GlobalIdentity); ok { + return identity.NewIdentityFromLabelArray(id, gi.LabelArray) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go new file mode 100644 index 0000000000..6a2a67919d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cache + +import ( + "fmt" + + "github.com/cilium/cilium/pkg/allocator" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/key" + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +type localIdentityCache struct { + mutex lock.RWMutex + identitiesByID map[identity.NumericIdentity]*identity.Identity + identitiesByLabels map[string]*identity.Identity + nextNumericIdentity identity.NumericIdentity + scope identity.NumericIdentity + minID identity.NumericIdentity + maxID identity.NumericIdentity + events allocator.AllocatorEventSendChan + + // withheldIdentities is a set of identities that should be considered unavailable for allocation, + // but not yet allocated. + // They are used during agent restart, where local identities are restored to prevent unnecessary + // ID flapping on restart. + // + // If an old nID is passed to lookupOrCreate(), then it is allowed to use a withhend entry here. Otherwise + // it must allocate a new ID not in this set. + withheldIdentities map[identity.NumericIdentity]struct{} +} + +func newLocalIdentityCache(scope, minID, maxID identity.NumericIdentity, events allocator.AllocatorEventSendChan) *localIdentityCache { + return &localIdentityCache{ + identitiesByID: map[identity.NumericIdentity]*identity.Identity{}, + identitiesByLabels: map[string]*identity.Identity{}, + nextNumericIdentity: minID, + scope: scope, + minID: minID, + maxID: maxID, + events: events, + withheldIdentities: map[identity.NumericIdentity]struct{}{}, + } +} + +func (l *localIdentityCache) bumpNextNumericIdentity() { + if l.nextNumericIdentity == l.maxID { + l.nextNumericIdentity = l.minID + } else { + l.nextNumericIdentity++ + } +} + +// getNextFreeNumericIdentity returns the next available numeric identity or an error +// If idCandidate has the local scope and is available, it will be returned instead of +// searching for a new numeric identity. +// The l.mutex must be held +func (l *localIdentityCache) getNextFreeNumericIdentity(idCandidate identity.NumericIdentity) (identity.NumericIdentity, error) { + // Try first with the given candidate + if idCandidate.Scope() == l.scope { + if _, taken := l.identitiesByID[idCandidate]; !taken { + // let nextNumericIdentity be, allocated identities will be skipped anyway + log.Debugf("Reallocated restored local identity: %d", idCandidate) + return idCandidate, nil + } else { + log.WithField(logfields.Identity, idCandidate).Debug("Requested local identity not available to allocate") + } + } + firstID := l.nextNumericIdentity + for { + idCandidate = l.nextNumericIdentity | l.scope + _, taken := l.identitiesByID[idCandidate] + _, withheld := l.withheldIdentities[idCandidate] + if !taken && !withheld { + l.bumpNextNumericIdentity() + return idCandidate, nil + } + + l.bumpNextNumericIdentity() + if l.nextNumericIdentity == firstID { + // Desperation: no local identities left (unlikely). If there are withheld + // but not-taken identities, claim one of them. + for withheldID := range l.withheldIdentities { + if _, taken := l.identitiesByID[withheldID]; !taken { + delete(l.withheldIdentities, withheldID) + log.WithField(logfields.Identity, withheldID).Warn("Local identity allocator full; claiming first withheld identity. This may cause momentary policy drops") + return withheldID, nil + } + } + + return 0, fmt.Errorf("out of local identity space") + } + } +} + +// lookupOrCreate searches for the existence of a local identity with the given +// labels. If it exists, the reference count is incremented and the identity is +// returned. If it does not exist, a new identity is created with a unique +// numeric identity. All identities returned by lookupOrCreate() must be +// released again via localIdentityCache.release(). +// A possible previously used numeric identity for these labels can be passed +// in as the 'oldNID' parameter; identity.InvalidIdentity must be passed if no +// previous numeric identity exists. 'oldNID' will be reallocated if available. +func (l *localIdentityCache) lookupOrCreate(lbls labels.Labels, oldNID identity.NumericIdentity, notifyOwner bool) (*identity.Identity, bool, error) { + // Not converting to string saves an allocation, as byte key lookups into + // string maps are optimized by the compiler, see + // https://github.com/golang/go/issues/3512. + repr := lbls.SortedList() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if id, ok := l.identitiesByLabels[string(repr)]; ok { + id.ReferenceCount++ + return id, false, nil + } + + numericIdentity, err := l.getNextFreeNumericIdentity(oldNID) + if err != nil { + return nil, false, err + } + + id := &identity.Identity{ + ID: numericIdentity, + Labels: lbls, + LabelArray: lbls.LabelArray(), + ReferenceCount: 1, + } + + l.identitiesByLabels[string(repr)] = id + l.identitiesByID[numericIdentity] = id + + if l.events != nil && notifyOwner { + l.events <- allocator.AllocatorEvent{ + Typ: kvstore.EventTypeCreate, + ID: idpool.ID(id.ID), + Key: &key.GlobalIdentity{LabelArray: id.LabelArray}, + } + } + + return id, true, nil +} + +// release releases a local identity from the cache. true is returned when the +// last use of the identity has been released and the identity has been +// forgotten. +func (l *localIdentityCache) release(id *identity.Identity, notifyOwner bool) bool { + l.mutex.Lock() + defer l.mutex.Unlock() + + if id, ok := l.identitiesByID[id.ID]; ok { + switch { + case id.ReferenceCount > 1: + id.ReferenceCount-- + return false + + case id.ReferenceCount == 1: + // Release is only attempted once, when the reference count is + // hitting the last use + delete(l.identitiesByLabels, string(id.Labels.SortedList())) + delete(l.identitiesByID, id.ID) + + if l.events != nil && notifyOwner { + l.events <- allocator.AllocatorEvent{ + Typ: kvstore.EventTypeDelete, + ID: idpool.ID(id.ID), + } + } + + return true + } + } + + return false +} + +// withhold marks the nids as unavailable. Any out-of-scope identities are returned. +func (l *localIdentityCache) withhold(nids []identity.NumericIdentity) []identity.NumericIdentity { + if len(nids) == 0 { + return nil + } + + unused := make([]identity.NumericIdentity, 0, len(nids)) + l.mutex.Lock() + defer l.mutex.Unlock() + for _, nid := range nids { + if nid.Scope() != l.scope { + unused = append(unused, nid) + continue + } + l.withheldIdentities[nid] = struct{}{} + } + + return unused +} + +func (l *localIdentityCache) unwithhold(nids []identity.NumericIdentity) { + if len(nids) == 0 { + return + } + l.mutex.Lock() + defer l.mutex.Unlock() + for _, nid := range nids { + if nid.Scope() != l.scope { + continue + } + delete(l.withheldIdentities, nid) + } +} + +// lookup searches for a local identity matching the given labels and returns +// it. If found, the reference count is NOT incremented and thus release must +// NOT be called. +func (l *localIdentityCache) lookup(lbls labels.Labels) *identity.Identity { + l.mutex.RLock() + defer l.mutex.RUnlock() + + if id, ok := l.identitiesByLabels[string(lbls.SortedList())]; ok { + return id + } + + return nil +} + +// lookupByID searches for a local identity matching the given ID and returns +// it. If found, the reference count is NOT incremented and thus release must +// NOT be called. +func (l *localIdentityCache) lookupByID(id identity.NumericIdentity) *identity.Identity { + l.mutex.RLock() + defer l.mutex.RUnlock() + + if id, ok := l.identitiesByID[id]; ok { + return id + } + + return nil +} + +// GetIdentities returns all local identities +func (l *localIdentityCache) GetIdentities() map[identity.NumericIdentity]*identity.Identity { + cache := map[identity.NumericIdentity]*identity.Identity{} + + l.mutex.RLock() + defer l.mutex.RUnlock() + + for key, id := range l.identitiesByID { + cache[key] = id + } + + return cache +} + +// close removes the events channel. +func (l *localIdentityCache) close() { + l.mutex.Lock() + defer l.mutex.Unlock() + + l.events = nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go new file mode 100644 index 0000000000..fbf1b56fae --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package identitymanager tracks which global identities are being used by +// the currently running cilium-agent +package identitymanager diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go new file mode 100644 index 0000000000..b038e68ce1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package identitymanager + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "identitymanager") +) diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go new file mode 100644 index 0000000000..81766a099b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package identitymanager + +import ( + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/model" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + // GlobalIdentityManager is a singleton instance of an IdentityManager, used + // for easy updating / tracking lifecycles of identities on the local node + // without having to pass around a specific instance of an IdentityManager + // throughout Cilium. + GlobalIdentityManager = NewIdentityManager() +) + +// IdentityManager caches information about a set of identities, currently a +// reference count of how many users there are for each identity. +type IdentityManager struct { + mutex lock.RWMutex + identities map[identity.NumericIdentity]*identityMetadata + observers map[Observer]struct{} +} + +type identityMetadata struct { + identity *identity.Identity + refCount uint +} + +// NewIdentityManager returns an initialized IdentityManager. +func NewIdentityManager() *IdentityManager { + return &IdentityManager{ + identities: make(map[identity.NumericIdentity]*identityMetadata), + observers: make(map[Observer]struct{}), + } +} + +// Add inserts the identity into the GlobalIdentityManager. +func Add(identity *identity.Identity) { + GlobalIdentityManager.Add(identity) +} + +// Remove deletes the identity from the GlobalIdentityManager. +func Remove(identity *identity.Identity) { + GlobalIdentityManager.Remove(identity) +} + +// RemoveAll deletes all identities from the GlobalIdentityManager. +func RemoveAll() { + GlobalIdentityManager.RemoveAll() +} + +// Add inserts the identity into the identity manager. If the identity is +// already in the identity manager, the reference count for the identity is +// incremented. +func (idm *IdentityManager) Add(identity *identity.Identity) { + log.WithFields(logrus.Fields{ + logfields.Identity: identity, + }).Debug("Adding identity to the identity manager") + + idm.mutex.Lock() + defer idm.mutex.Unlock() + idm.add(identity) +} + +func (idm *IdentityManager) add(identity *identity.Identity) { + + if identity == nil { + return + } + + idMeta, exists := idm.identities[identity.ID] + if !exists { + idm.identities[identity.ID] = &identityMetadata{ + identity: identity, + refCount: 1, + } + for o := range idm.observers { + o.LocalEndpointIdentityAdded(identity) + } + + } else { + idMeta.refCount++ + } +} + +// RemoveOldAddNew removes old from the identity manager and inserts new +// into the IdentityManager. +// Caller must have previously added the old identity with Add(). +// This is a no-op if both identities have the same numeric ID. +func (idm *IdentityManager) RemoveOldAddNew(old, new *identity.Identity) { + idm.mutex.Lock() + defer idm.mutex.Unlock() + + if old == nil && new == nil { + return + } + // The host endpoint will always retain its reserved ID, but its labels may + // change so we need to update its identity. + if old != nil && new != nil && old.ID == new.ID && new.ID != identity.ReservedIdentityHost { + return + } + + log.WithFields(logrus.Fields{ + "old": old, + "new": new, + }).Debug("removing old and adding new identity") + + idm.remove(old) + idm.add(new) +} + +// RemoveOldAddNew removes old from and inserts new into the +// GlobalIdentityManager. +func RemoveOldAddNew(old, new *identity.Identity) { + GlobalIdentityManager.RemoveOldAddNew(old, new) +} + +// RemoveAll removes all identities. +func (idm *IdentityManager) RemoveAll() { + idm.mutex.Lock() + defer idm.mutex.Unlock() + + for id := range idm.identities { + idm.remove(idm.identities[id].identity) + } +} + +// Remove deletes the identity from the identity manager. If the identity is +// already in the identity manager, the reference count for the identity is +// decremented. If the identity is not in the cache, this is a no-op. If the +// ref count becomes zero, the identity is removed from the cache. +func (idm *IdentityManager) Remove(identity *identity.Identity) { + log.WithFields(logrus.Fields{ + logfields.Identity: identity, + }).Debug("Removing identity from the identity manager") + + idm.mutex.Lock() + defer idm.mutex.Unlock() + idm.remove(identity) +} + +func (idm *IdentityManager) remove(identity *identity.Identity) { + + if identity == nil { + return + } + + idMeta, exists := idm.identities[identity.ID] + if !exists { + log.WithFields(logrus.Fields{ + logfields.Identity: identity, + }).Error("removing identity not added to the identity manager!") + return + } + idMeta.refCount-- + if idMeta.refCount == 0 { + delete(idm.identities, identity.ID) + for o := range idm.observers { + o.LocalEndpointIdentityRemoved(identity) + } + } + +} + +// GetIdentityModels returns the API representation of the IdentityManager. +func (idm *IdentityManager) GetIdentityModels() []*models.IdentityEndpoints { + idm.mutex.RLock() + defer idm.mutex.RUnlock() + + identities := make([]*models.IdentityEndpoints, 0, len(idm.identities)) + + for _, v := range idm.identities { + identities = append(identities, &models.IdentityEndpoints{ + Identity: model.CreateModel(v.identity), + RefCount: int64(v.refCount), + }) + } + + return identities +} + +func (idm *IdentityManager) subscribe(o Observer) { + idm.mutex.Lock() + defer idm.mutex.Unlock() + idm.observers[o] = struct{}{} +} + +// GetIdentityModels returns the API model of all identities in the +// GlobalIdentityManager. +func GetIdentityModels() []*models.IdentityEndpoints { + return GlobalIdentityManager.GetIdentityModels() +} + +// IdentitiesModel is a wrapper so that we can implement the sort.Interface +// to sort the slice by ID +type IdentitiesModel []*models.IdentityEndpoints + +// Less returns true if the element in index `i` is lower than the element +// in index `j` +func (s IdentitiesModel) Less(i, j int) bool { + return s[i].Identity.ID < s[j].Identity.ID +} + +// Subscribe adds the specified Observer to the global identity manager, to be +// notified upon changes to local identity usage. +func Subscribe(o Observer) { + GlobalIdentityManager.subscribe(o) +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go new file mode 100644 index 0000000000..c5eacc078c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package identitymanager + +import ( + "github.com/cilium/cilium/pkg/identity" +) + +// Observer can sign up to receive events whenever local identities are removed. +type Observer interface { + // LocalEndpointIdentityAdded is called when an identity first becomes + // used on the node. Implementations must ensure that the callback + // returns within a reasonable period. + LocalEndpointIdentityAdded(*identity.Identity) + + // LocalEndpointIdentityRemoved is called when an identity is no longer + // in use on the node. Implementations must ensure that the callback + // returns within a reasonable period. + LocalEndpointIdentityRemoved(*identity.Identity) +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go new file mode 100644 index 0000000000..96582a5d4f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package key + +import ( + "maps" + "strings" + + "github.com/cilium/cilium/pkg/allocator" + "github.com/cilium/cilium/pkg/labels" +) + +const ( + // MetadataKeyBackendKey is the key used to store the backend key. + MetadataKeyBackendKey = iota +) + +// GlobalIdentity is the structure used to store an identity +type GlobalIdentity struct { + labels.LabelArray + + // metadata contains metadata that are stored for example by the backends. + metadata map[any]any +} + +// GetKey encodes an Identity as string +func (gi *GlobalIdentity) GetKey() string { + var str strings.Builder + for _, l := range gi.LabelArray { + str.Write(l.FormatForKVStore()) + } + return str.String() +} + +// GetAsMap encodes a GlobalIdentity a map of keys to values. The keys will +// include a source delimted by a ':'. This output is pareable by PutKeyFromMap. +func (gi *GlobalIdentity) GetAsMap() map[string]string { + return gi.StringMap() +} + +// PutKey decodes an Identity from its string representation +func (gi *GlobalIdentity) PutKey(v string) allocator.AllocatorKey { + return &GlobalIdentity{LabelArray: labels.NewLabelArrayFromSortedList(v)} +} + +// PutKeyFromMap decodes an Identity from a map of key to value. Output +// from GetAsMap can be parsed. +// Note: NewLabelArrayFromMap will parse the ':' separated label source from +// the keys because the source parameter is "" +func (gi *GlobalIdentity) PutKeyFromMap(v map[string]string) allocator.AllocatorKey { + return &GlobalIdentity{LabelArray: labels.Map2Labels(v, "").LabelArray()} +} + +// PutValue puts metadata inside the global identity for the given 'key' with +// the given 'value'. +func (gi *GlobalIdentity) PutValue(key, value any) allocator.AllocatorKey { + newMap := map[any]any{} + if gi.metadata != nil { + newMap = maps.Clone(gi.metadata) + } + newMap[key] = value + return &GlobalIdentity{ + LabelArray: gi.LabelArray, + metadata: newMap, + } +} + +// Value returns the value stored in the metadata map. +func (gi *GlobalIdentity) Value(key any) any { + return gi.metadata[key] +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go b/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go new file mode 100644 index 0000000000..4b99df3aaa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package model + +import ( + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/labels" +) + +func NewIdentityFromModel(base *models.Identity) *identity.Identity { + if base == nil { + return nil + } + + id := &identity.Identity{ + ID: identity.NumericIdentity(base.ID), + Labels: make(labels.Labels, len(base.Labels)), + } + for _, v := range base.Labels { + lbl := labels.ParseLabel(v) + id.Labels[lbl.Key] = lbl + } + id.Sanitize() + + return id +} + +func CreateModel(id *identity.Identity) *models.Identity { + if id == nil { + return nil + } + + ret := &models.Identity{ + ID: int64(id.ID), + Labels: make([]string, 0, len(id.Labels)), + } + + for _, v := range id.Labels { + ret.Labels = append(ret.Labels, v.String()) + } + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/idpool/idpool.go b/vendor/github.com/cilium/cilium/pkg/idpool/idpool.go new file mode 100644 index 0000000000..0429b1d9cb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/idpool/idpool.go @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package idpool + +import ( + "strconv" + + "github.com/cilium/cilium/pkg/lock" +) + +// ID is a numeric identifier +type ID uint64 + +// NoID is a special ID that represents "no ID available" +const NoID ID = 0 + +// String returns the string representation of an allocated ID +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 10) +} + +// IDPool represents a pool of IDs that can be managed concurrently +// via local usage and external events. +// +// An intermediate state (leased) is introduced to the life cycle +// of an ID in the pool, in order to prevent lost updates to the +// pool that can occur as a result of employing both management schemes +// simultaneously. +// Local usage of an ID becomes a two stage process of leasing +// the ID from the pool, and later, Use()ing or Release()ing the ID on +// the pool upon successful or unsuccessful usage respectively, +// +// The table below shows the state transitions in the ID's life cycle. +// In the case of LeaseAvailableID() the ID is returned rather +// than provided as an input to the operation. +// All ID's begin in the available state. +/* +--------------------------------------------------------------------- +|state\event | LeaseAvailableID | Release | Use | Insert | Remove | +--------------------------------------------------------------------- +|1 available | 2 | * | * | * | 3 | +--------------------------------------------------------------------- +|2 leased | ** | 1 | 3 | * | 3 | +--------------------------------------------------------------------- +|3 unavailable | ** | * | * | 1 | * | +--------------------------------------------------------------------- +* The event has no effect. +** This is guaranteed never to occur. +*/ +type IDPool struct { + // mutex protects all IDPool data structures + mutex lock.Mutex + + // min is the lower limit when leasing IDs. The pool will never + // return an ID lesser than this value. + minID ID + + // max is the upper limit when leasing IDs. The pool will never + // return an ID greater than this value. + maxID ID + + // idCache is a cache of IDs backing the pool. + idCache *idCache +} + +// NewIDPool returns a new ID pool +func NewIDPool(minID ID, maxID ID) IDPool { + return IDPool{ + minID: minID, + maxID: maxID, + idCache: newIDCache(minID, maxID), + } +} + +// LeaseAvailableID returns an available ID at random from the pool. +// Returns an ID or NoID if no there is no available ID in the pool. +func (p *IDPool) LeaseAvailableID() ID { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.leaseAvailableID() +} + +// AllocateID returns a random available ID. Unlike LeaseAvailableID, the ID is +// immediately marked for use and there is no need to call Use(). +func (p *IDPool) AllocateID() ID { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.allocateID() +} + +// Release returns a leased ID back to the pool. +// This operation accounts for IDs that were previously leased +// from the pool but were unused, e.g if allocation was unsuccessful. +// Thus, it has no effect if the ID is not currently leased in the +// pool, or the pool has since been refreshed. +// +// Returns true if the ID was returned back to the pool as +// a result of this call. +func (p *IDPool) Release(id ID) bool { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.release(id) +} + +// Use makes a leased ID unavailable in the pool and has no effect +// otherwise. Returns true if the ID was made unavailable +// as a result of this call. +func (p *IDPool) Use(id ID) bool { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.use(id) +} + +// Insert makes an unavailable ID available in the pool +// and has no effect otherwise. Returns true if the ID +// was added back to the pool. +func (p *IDPool) Insert(id ID) bool { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.insert(id) +} + +// Remove makes an ID unavailable in the pool. +// Returns true if the ID was previously available in the pool. +func (p *IDPool) Remove(id ID) bool { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.idCache.remove(id) +} + +type idCache struct { + // ids is a slice of IDs available in this idCache. + ids map[ID]struct{} + + // leased is the set of IDs that are leased in this idCache. + leased map[ID]struct{} +} + +func newIDCache(minID ID, maxID ID) *idCache { + n := int(maxID - minID + 1) + if n < 0 { + n = 0 + } + + c := &idCache{ + ids: make(map[ID]struct{}, n), + leased: make(map[ID]struct{}), + } + + for id := minID; id < maxID+1; id++ { + c.ids[id] = struct{}{} + } + + return c +} + +// allocateID returns a random available ID without leasing it +func (c *idCache) allocateID() ID { + for id := range c.ids { + delete(c.ids, id) + return id + } + + return NoID +} + +// leaseAvailableID returns a random available ID. +func (c *idCache) leaseAvailableID() ID { + id := c.allocateID() + if id == NoID { + return NoID + } + + // Mark as leased + c.leased[id] = struct{}{} + + return id +} + +// release makes the ID available again if it is currently +// leased and has no effect otherwise. Returns true if the +// ID was made available as a result of this call. +func (c *idCache) release(id ID) bool { + if _, exists := c.leased[id]; !exists { + return false + } + + delete(c.leased, id) + c.insert(id) + + return true +} + +// use makes the ID unavailable if it is currently +// leased and has no effect otherwise. Returns true if the +// ID was made unavailable as a result of this call. +func (c *idCache) use(id ID) bool { + if _, exists := c.leased[id]; !exists { + return false + } + + delete(c.leased, id) + return true +} + +// insert adds the ID into the cache if it is currently unavailable. +// Returns true if the ID was added to the cache. +func (c *idCache) insert(id ID) bool { + if _, ok := c.ids[id]; ok { + return false + } + + if _, exists := c.leased[id]; exists { + return false + } + + c.ids[id] = struct{}{} + return true +} + +// remove removes the ID from the cache. +// Returns true if the ID was available in the cache. +func (c *idCache) remove(id ID) bool { + delete(c.leased, id) + + if _, ok := c.ids[id]; ok { + delete(c.ids, id) + return true + } + + return false +} diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go new file mode 100644 index 0000000000..1377e25b1f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "bytes" + "net" + + "github.com/cilium/cilium/api/v1/models" +) + +type IPListEntrySlice []*models.IPListEntry + +func (s IPListEntrySlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less sorts the IPListEntry objects by CIDR prefix then IP address. +// Given that the same IP cannot map to more than one identity, no further +// sorting is performed. +func (s IPListEntrySlice) Less(i, j int) bool { + _, iNet, _ := net.ParseCIDR(*s[i].Cidr) + _, jNet, _ := net.ParseCIDR(*s[j].Cidr) + iPrefixSize, _ := iNet.Mask.Size() + jPrefixSize, _ := jNet.Mask.Size() + if iPrefixSize == jPrefixSize { + return bytes.Compare(iNet.IP, jNet.IP) < 0 + } + return iPrefixSize < jPrefixSize +} + +func (s IPListEntrySlice) Len() int { + return len(s) +} diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go new file mode 100644 index 0000000000..d5b3c9e73c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "context" + "net" + "net/netip" + "strconv" + "strings" + "sync" + + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/cache" +) + +// PolicyHandler is responsible for handling identity updates into the core +// policy engine. See SelectorCache.UpdateIdentities() for more details. +type PolicyHandler interface { + UpdateIdentities(added, deleted cache.IdentityCache, wg *sync.WaitGroup) +} + +// DatapathHandler is responsible for ensuring that policy updates in the +// core policy engine are pushed into the underlying BPF policy maps, to ensure +// that the policies are actively being enforced in the datapath for any new +// identities that have been updated using 'PolicyHandler'. +// +// Wait on the returned sync.WaitGroup to ensure that the operation is complete +// before updating the datapath's IPCache maps. +type DatapathHandler interface { + UpdatePolicyMaps(context.Context, *sync.WaitGroup) *sync.WaitGroup +} + +// ResourceID identifies a unique copy of a resource that provides a source for +// information tied to an IP address in the IPCache. +type ResourceID string + +// ResourceKind determines the source of the ResourceID. Typically this is the +// short name for the k8s resource. +type ResourceKind string + +var ( + ResourceKindCNP = ResourceKind("cnp") + ResourceKindCCNP = ResourceKind("ccnp") + ResourceKindDaemon = ResourceKind("daemon") + ResourceKindEndpoint = ResourceKind("ep") + ResourceKindNetpol = ResourceKind("netpol") + ResourceKindNode = ResourceKind("node") +) + +// NewResourceID returns a ResourceID populated with the standard fields for +// uniquely identifying a source of IPCache information. +func NewResourceID(kind ResourceKind, namespace, name string) ResourceID { + str := strings.Builder{} + str.Grow(len(kind) + 1 + len(namespace) + 1 + len(name)) + str.WriteString(string(kind)) + str.WriteRune('/') + str.WriteString(namespace) + str.WriteRune('/') + str.WriteString(name) + return ResourceID(str.String()) +} + +// TunnelPeer is the IP address of the host associated with this prefix. This is +// typically used to establish a tunnel, e.g. in tunnel mode or for encryption. +// This type implements ipcache.IPMetadata +type TunnelPeer struct{ netip.Addr } + +func (t TunnelPeer) IP() net.IP { + return t.AsSlice() +} + +// EncryptKey is the identity of the encryption key. +// This type implements ipcache.IPMetadata +type EncryptKey uint8 + +const EncryptKeyEmpty = EncryptKey(0) + +func (e EncryptKey) IsValid() bool { + return e != EncryptKeyEmpty +} + +func (e EncryptKey) Uint8() uint8 { + return uint8(e) +} + +func (e EncryptKey) String() string { + return strconv.Itoa(int(e)) +} + +// RequestedIdentity is a desired numeric identity for the prefix. When the +// prefix is next injected, this numeric ID will be requested from the local +// allocator. If the allocator can accommodate that request, it will do so. +// In order for this to be useful, the prefix must not already have an identity +// (or its set of labels must have changed), and that numeric identity must +// be free. +// Thus, the numeric ID should have already been held-aside in the allocator +// using WithholdLocalIdentities(). That will ensure the numeric ID remains free +// for the prefix to request. +type RequestedIdentity identity.NumericIdentity + +func (id RequestedIdentity) IsValid() bool { + return id != 0 +} + +func (id RequestedIdentity) ID() identity.NumericIdentity { + return identity.NumericIdentity(id) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/annotate.go b/vendor/github.com/cilium/cilium/pkg/k8s/annotate.go new file mode 100644 index 0000000000..984e50b828 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/annotate.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + k8sTypes "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + + "github.com/cilium/cilium/pkg/annotation" + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/logging/logfields" + nodeTypes "github.com/cilium/cilium/pkg/node/types" +) + +type nodeAnnotation = map[string]string + +var nodeAnnotationControllerGroup = controller.NewGroup("update-k8s-node-annotations") + +func prepareNodeAnnotation(nd nodeTypes.Node, encryptKey uint8) nodeAnnotation { + annotationMap := map[string]fmt.Stringer{ + annotation.V4CIDRName: nd.IPv4AllocCIDR, + annotation.V6CIDRName: nd.IPv6AllocCIDR, + annotation.V4HealthName: nd.IPv4HealthIP, + annotation.V6HealthName: nd.IPv6HealthIP, + annotation.V4IngressName: nd.IPv4IngressIP, + annotation.V6IngressName: nd.IPv6IngressIP, + annotation.CiliumHostIP: nd.GetCiliumInternalIP(false), + annotation.CiliumHostIPv6: nd.GetCiliumInternalIP(true), + } + + annotations := map[string]string{} + for k, v := range annotationMap { + if !reflect.ValueOf(v).IsNil() { + annotations[k] = v.String() + } + } + if encryptKey != 0 { + annotations[annotation.CiliumEncryptionKey] = strconv.FormatUint(uint64(encryptKey), 10) + } + return annotations +} + +func updateNodeAnnotation(c kubernetes.Interface, nodeName string, annotation nodeAnnotation) error { + if len(annotation) == 0 { + return nil + } + + raw, err := json.Marshal(annotation) + if err != nil { + return err + } + patch := []byte(fmt.Sprintf(`{"metadata":{"annotations":%s}}`, raw)) + + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "status") + + return err +} + +// AnnotateNode writes v4 and v6 CIDRs and health IPs in the given k8s node name. +// In case of failure while updating the node, this function while spawn a go +// routine to retry the node update indefinitely. +func AnnotateNode(cs kubernetes.Interface, nodeName string, nd nodeTypes.Node, encryptKey uint8) (nodeAnnotation, error) { + scopedLog := log.WithFields(logrus.Fields{ + logfields.NodeName: nodeName, + logfields.V4Prefix: nd.IPv4AllocCIDR, + logfields.V6Prefix: nd.IPv6AllocCIDR, + logfields.V4HealthIP: nd.IPv4HealthIP, + logfields.V6HealthIP: nd.IPv6HealthIP, + logfields.V4IngressIP: nd.IPv4IngressIP, + logfields.V6IngressIP: nd.IPv6IngressIP, + logfields.V4CiliumHostIP: nd.GetCiliumInternalIP(false), + logfields.V6CiliumHostIP: nd.GetCiliumInternalIP(true), + logfields.Key: encryptKey, + }) + scopedLog.Debug("Updating node annotations with node CIDRs") + annotation := prepareNodeAnnotation(nd, encryptKey) + controller.NewManager().UpdateController("update-k8s-node-annotations", + controller.ControllerParams{ + Group: nodeAnnotationControllerGroup, + DoFunc: func(_ context.Context) error { + err := updateNodeAnnotation(cs, nodeName, annotation) + if err != nil { + scopedLog.WithFields(logrus.Fields{}).WithError(err).Warn("Unable to patch node resource with annotation") + } + return err + }, + }) + + return annotation, nil +} + +func prepareRemoveNodeAnnotationsPayload(annotation nodeAnnotation) ([]byte, error) { + deleteAnnotations := []JSONPatch{} + + for key := range annotation { + deleteAnnotations = append(deleteAnnotations, JSONPatch{ + OP: "remove", + Path: "/metadata/annotations/" + encodeJsonElement(key), + }) + } + + return json.Marshal(deleteAnnotations) +} + +func RemoveNodeAnnotations(c kubernetes.Interface, nodeName string, annotation nodeAnnotation) error { + patch, err := prepareRemoveNodeAnnotationsPayload(annotation) + if err != nil { + return err + } + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, k8sTypes.JSONPatchType, patch, metav1.PatchOptions{}, "status") + return err +} + +func encodeJsonElement(element string) string { + return strings.Replace(element, "/", "~1", -1) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/cache_status.go b/vendor/github.com/cilium/cilium/pkg/k8s/cache_status.go new file mode 100644 index 0000000000..ce03f422e3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/cache_status.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +// CacheStatus allows waiting for k8s caches to synchronize. +type CacheStatus chan struct{} + +// Sychronized returns true if caches have been synchronized at least once. +// +// Returns true for an uninitialized [CacheStatus]. +func (cs CacheStatus) Synchronized() bool { + if cs == nil { + return true + } + + select { + case <-cs: + return true + default: + return false + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/cilium_node.go b/vendor/github.com/cilium/cilium/pkg/k8s/cilium_node.go new file mode 100644 index 0000000000..06a736bb85 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/cilium_node.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + nodeTypes "github.com/cilium/cilium/pkg/node/types" +) + +// IsLocalCiliumNode returns true if the given CiliumNode object refers to the +// CiliumNode object representing the local node. +func IsLocalCiliumNode(n *ciliumv2.CiliumNode) bool { + return n != nil && n.GetName() == nodeTypes.GetName() +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go new file mode 100644 index 0000000000..1dac2c6ec3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/sirupsen/logrus" + apiext_clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiext_fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/connrotation" + + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/hive/cell" + cilium_clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + cilium_fake "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake" + k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics" + slim_apiextclientsetscheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme" + slim_apiext_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + slim_metav1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1" + slim_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned" + slim_fake "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake" + k8sversion "github.com/cilium/cilium/pkg/k8s/version" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/version" +) + +// client.Cell provides Clientset, a composition of clientsets to Kubernetes resources +// used by Cilium. +var Cell = cell.Module( + "k8s-client", + "Kubernetes Client", + + cell.Config(defaultConfig), + cell.Provide(newClientset), +) + +var k8sHeartbeatControllerGroup = controller.NewGroup("k8s-heartbeat") + +// Type aliases for the clientsets to avoid name collision on 'Clientset' when composing them. +type ( + KubernetesClientset = kubernetes.Clientset + SlimClientset = slim_clientset.Clientset + APIExtClientset = slim_apiext_clientset.Clientset + CiliumClientset = cilium_clientset.Clientset +) + +// Clientset is a composition of the different client sets used by Cilium. +type Clientset interface { + kubernetes.Interface + apiext_clientset.Interface + cilium_clientset.Interface + Getters + + // Slim returns the slim client, which contains some of the same APIs as the + // normal kubernetes client, but with slimmed down messages to reduce memory + // usage. Prefer the slim version when caching messages. + Slim() slim_clientset.Interface + + // IsEnabled returns true if Kubernetes support is enabled and the + // clientset can be used. + IsEnabled() bool + + // Disable disables the client. Panics if called after the clientset has been + // started. + Disable() + + // Config returns the configuration used to create this client. + Config() Config + + // RestConfig returns the deep copy of rest configuration. + RestConfig() *rest.Config +} + +// compositeClientset implements the Clientset using real clients. +type compositeClientset struct { + started bool + disabled bool + + *KubernetesClientset + *APIExtClientset + *CiliumClientset + clientsetGetters + + controller *controller.Manager + slim *SlimClientset + config Config + log logrus.FieldLogger + closeAllConns func() + restConfig *rest.Config +} + +func newClientset(lc cell.Lifecycle, log logrus.FieldLogger, cfg Config) (Clientset, error) { + if !cfg.isEnabled() { + return &compositeClientset{disabled: true}, nil + } + + if cfg.K8sAPIServer != "" && + !strings.HasPrefix(cfg.K8sAPIServer, "http") { + cfg.K8sAPIServer = "http://" + cfg.K8sAPIServer // default to HTTP + } + + client := compositeClientset{ + log: log, + controller: controller.NewManager(), + config: cfg, + } + + restConfig, err := createConfig(cfg.K8sAPIServer, cfg.K8sKubeConfigPath, cfg.K8sClientQPS, cfg.K8sClientBurst) + if err != nil { + return nil, fmt.Errorf("unable to create k8s client rest configuration: %w", err) + } + client.restConfig = restConfig + defaultCloseAllConns := setDialer(cfg, restConfig) + + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return nil, fmt.Errorf("unable to create k8s REST client: %w", err) + } + + // We are implementing the same logic as Kubelet, see + // https://github.com/kubernetes/kubernetes/blob/v1.24.0-beta.0/cmd/kubelet/app/server.go#L852. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + client.closeAllConns = defaultCloseAllConns + } else { + client.closeAllConns = func() { + utilnet.CloseIdleConnectionsFor(restConfig.Transport) + } + } + + // Slim and K8s clients use protobuf marshalling. + restConfig.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf` + + client.slim, err = slim_clientset.NewForConfigAndClient(restConfig, httpClient) + if err != nil { + return nil, fmt.Errorf("unable to create slim k8s client: %w", err) + } + + client.APIExtClientset, err = slim_apiext_clientset.NewForConfigAndClient(restConfig, httpClient) + if err != nil { + return nil, fmt.Errorf("unable to create apiext k8s client: %w", err) + } + + client.KubernetesClientset, err = kubernetes.NewForConfigAndClient(restConfig, httpClient) + if err != nil { + return nil, fmt.Errorf("unable to create k8s client: %w", err) + } + + client.clientsetGetters = clientsetGetters{&client} + + // The cilium client uses JSON marshalling. + restConfig.ContentConfig.ContentType = `application/json` + client.CiliumClientset, err = cilium_clientset.NewForConfigAndClient(restConfig, httpClient) + if err != nil { + return nil, fmt.Errorf("unable to create cilium k8s client: %w", err) + } + + lc.Append(cell.Hook{ + OnStart: client.onStart, + OnStop: client.onStop, + }) + + return &client, nil +} + +func (c *compositeClientset) Slim() slim_clientset.Interface { + return c.slim +} + +func (c *compositeClientset) Discovery() discovery.DiscoveryInterface { + return c.KubernetesClientset.Discovery() +} + +func (c *compositeClientset) IsEnabled() bool { + return c != nil && c.config.isEnabled() && !c.disabled +} + +func (c *compositeClientset) Disable() { + if c.started { + panic("Clientset.Disable() called after it had been started") + } + c.disabled = true +} + +func (c *compositeClientset) Config() Config { + return c.config +} + +func (c *compositeClientset) RestConfig() *rest.Config { + return rest.CopyConfig(c.restConfig) +} + +func (c *compositeClientset) onStart(startCtx cell.HookContext) error { + if !c.IsEnabled() { + return nil + } + + if err := c.waitForConn(startCtx); err != nil { + return err + } + c.startHeartbeat() + + // Update the global K8s clients, K8s version and the capabilities. + if err := k8sversion.Update(c, c.config.EnableK8sAPIDiscovery); err != nil { + return err + } + + if !k8sversion.Capabilities().MinimalVersionMet { + return fmt.Errorf("k8s version (%v) is not meeting the minimal requirement (%v)", + k8sversion.Version(), k8sversion.MinimalVersionConstraint) + } + + c.started = true + + return nil +} + +func (c *compositeClientset) onStop(stopCtx cell.HookContext) error { + if c.IsEnabled() { + c.controller.RemoveAllAndWait() + c.closeAllConns() + } + c.started = false + return nil +} + +func (c *compositeClientset) startHeartbeat() { + restClient := c.KubernetesClientset.RESTClient() + + timeout := c.config.K8sHeartbeatTimeout + if timeout == 0 { + return + } + + heartBeat := func(ctx context.Context) error { + // Kubernetes does a get node of the node that kubelet is running [0]. This seems excessive in + // our case because the amount of data transferred is bigger than doing a Get of /healthz. + // For this reason we have picked to perform a get on `/healthz` instead a get of a node. + // + // [0] https://github.com/kubernetes/kubernetes/blob/v1.17.3/pkg/kubelet/kubelet_node_status.go#L423 + res := restClient.Get().Resource("healthz").Do(ctx) + return res.Error() + } + + c.controller.UpdateController("k8s-heartbeat", + controller.ControllerParams{ + Group: k8sHeartbeatControllerGroup, + DoFunc: func(context.Context) error { + runHeartbeat( + c.log, + heartBeat, + timeout, + c.closeAllConns, + ) + return nil + }, + RunInterval: timeout, + }) +} + +// createConfig creates a rest.Config for connecting to k8s api-server. +// +// The precedence of the configuration selection is the following: +// 1. kubeCfgPath +// 2. apiServerURL (https if specified) +// 3. rest.InClusterConfig(). +func createConfig(apiServerURL, kubeCfgPath string, qps float32, burst int) (*rest.Config, error) { + var ( + config *rest.Config + err error + ) + cmdName := "cilium" + if len(os.Args[0]) != 0 { + cmdName = filepath.Base(os.Args[0]) + } + userAgent := fmt.Sprintf("%s/%s", cmdName, version.Version) + + switch { + // If the apiServerURL and the kubeCfgPath are empty then we can try getting + // the rest.Config from the InClusterConfig + case apiServerURL == "" && kubeCfgPath == "": + if config, err = rest.InClusterConfig(); err != nil { + return nil, err + } + case kubeCfgPath != "": + if config, err = clientcmd.BuildConfigFromFlags("", kubeCfgPath); err != nil { + return nil, err + } + case strings.HasPrefix(apiServerURL, "https://"): + if config, err = rest.InClusterConfig(); err != nil { + return nil, err + } + config.Host = apiServerURL + default: + config = &rest.Config{Host: apiServerURL, UserAgent: userAgent} + } + + setConfig(config, userAgent, qps, burst) + return config, nil +} + +func setConfig(config *rest.Config, userAgent string, qps float32, burst int) { + if userAgent != "" { + config.UserAgent = userAgent + } + if qps != 0.0 { + config.QPS = qps + } + if burst != 0 { + config.Burst = burst + } +} + +func (c *compositeClientset) waitForConn(ctx context.Context) error { + stop := make(chan struct{}) + timeout := time.NewTimer(time.Minute) + defer timeout.Stop() + var err error + wait.Until(func() { + c.log.WithField("host", c.restConfig.Host).Info("Establishing connection to apiserver") + err = isConnReady(c) + if err == nil { + close(stop) + return + } + + select { + case <-ctx.Done(): + case <-timeout.C: + default: + return + } + + c.log.WithError(err).WithField(logfields.IPAddr, c.restConfig.Host).Error("Unable to contact k8s api-server") + close(stop) + }, 5*time.Second, stop) + if err == nil { + c.log.Info("Connected to apiserver") + } + return err +} + +func setDialer(cfg Config, restConfig *rest.Config) func() { + if cfg.K8sHeartbeatTimeout == 0 { + return func() {} + } + ctx := (&net.Dialer{ + Timeout: cfg.K8sHeartbeatTimeout, + KeepAlive: cfg.K8sHeartbeatTimeout, + }).DialContext + dialer := connrotation.NewDialer(ctx) + restConfig.Dial = dialer.DialContext + return dialer.CloseAll +} + +func runHeartbeat(log logrus.FieldLogger, heartBeat func(context.Context) error, timeout time.Duration, closeAllConns ...func()) { + expireDate := time.Now().Add(-timeout) + // Don't even perform a health check if we have received a successful + // k8s event in the last 'timeout' duration + if k8smetrics.LastSuccessInteraction.Time().After(expireDate) { + return + } + + done := make(chan error) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + go func() { + // If we have reached up to this point to perform a heartbeat to + // kube-apiserver then we should close the connections if we receive + // any error at all except if we receive a http.StatusTooManyRequests + // which means the server is overloaded and only for this reason we + // will not close all connections. + err := heartBeat(ctx) + switch t := err.(type) { + case *errors.StatusError: + if t.ErrStatus.Code != http.StatusTooManyRequests { + done <- err + } + default: + done <- err + } + close(done) + }() + + select { + case err := <-done: + if err != nil { + log.WithError(err).Warn("Network status error received, restarting client connections") + for _, fn := range closeAllConns { + fn() + } + } + case <-ctx.Done(): + log.Warn("Heartbeat timed out, restarting client connections") + for _, fn := range closeAllConns { + fn() + } + } +} + +// isConnReady returns the err for the kube-system namespace get +func isConnReady(c kubernetes.Interface) error { + _, err := c.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{}) + return err +} + +var FakeClientCell = cell.Provide(NewFakeClientset) + +type ( + KubernetesFakeClientset = fake.Clientset + SlimFakeClientset = slim_fake.Clientset + CiliumFakeClientset = cilium_fake.Clientset + APIExtFakeClientset = apiext_fake.Clientset +) + +type FakeClientset struct { + disabled bool + + *KubernetesFakeClientset + *CiliumFakeClientset + *APIExtFakeClientset + clientsetGetters + + SlimFakeClientset *SlimFakeClientset + + enabled bool +} + +var _ Clientset = &FakeClientset{} + +func (c *FakeClientset) Slim() slim_clientset.Interface { + return c.SlimFakeClientset +} + +func (c *FakeClientset) Discovery() discovery.DiscoveryInterface { + return c.KubernetesFakeClientset.Discovery() +} + +func (c *FakeClientset) IsEnabled() bool { + return !c.disabled +} + +func (c *FakeClientset) Disable() { + c.disabled = true +} + +func (c *FakeClientset) Config() Config { + return Config{} +} + +func (c *FakeClientset) RestConfig() *rest.Config { + return &rest.Config{} +} + +func NewFakeClientset() (*FakeClientset, Clientset) { + client := FakeClientset{ + SlimFakeClientset: slim_fake.NewSimpleClientset(), + CiliumFakeClientset: cilium_fake.NewSimpleClientset(), + APIExtFakeClientset: apiext_fake.NewSimpleClientset(), + KubernetesFakeClientset: fake.NewSimpleClientset(), + enabled: true, + } + client.clientsetGetters = clientsetGetters{&client} + return &client, &client +} + +// NewStandaloneClientset creates a clientset outside hive. To be removed once +// remaining uses of k8s.Init()/k8s.Client()/etc. have been converted. +func NewStandaloneClientset(cfg Config) (Clientset, error) { + log := logging.DefaultLogger + lc := &cell.DefaultLifecycle{} + + clientset, err := newClientset(lc, log, cfg) + if err != nil { + return nil, err + } + + if err := lc.Start(context.Background()); err != nil { + return nil, err + } + + return clientset, err +} + +func init() { + // Register the metav1.Table and metav1.PartialObjectMetadata for the + // apiextclientset. + utilruntime.Must(slim_metav1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme)) + utilruntime.Must(slim_metav1beta1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..f39a5d25ec --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" + fakeciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake" + ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" + fakeciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// CiliumV2 retrieves the CiliumV2Client +func (c *Clientset) CiliumV2() ciliumv2.CiliumV2Interface { + return &fakeciliumv2.FakeCiliumV2{Fake: &c.Fake} +} + +// CiliumV2alpha1 retrieves the CiliumV2alpha1Client +func (c *Clientset) CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface { + return &fakeciliumv2alpha1.FakeCiliumV2alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..1da8cc9d5b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..22b7a01cc5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + ciliumv2.AddToScheme, + ciliumv2alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go new file mode 100644 index 0000000000..9ed059f648 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCiliumV2 struct { + *testing.Fake +} + +func (c *FakeCiliumV2) CiliumClusterwideEnvoyConfigs() v2.CiliumClusterwideEnvoyConfigInterface { + return &FakeCiliumClusterwideEnvoyConfigs{c} +} + +func (c *FakeCiliumV2) CiliumClusterwideNetworkPolicies() v2.CiliumClusterwideNetworkPolicyInterface { + return &FakeCiliumClusterwideNetworkPolicies{c} +} + +func (c *FakeCiliumV2) CiliumEgressGatewayPolicies() v2.CiliumEgressGatewayPolicyInterface { + return &FakeCiliumEgressGatewayPolicies{c} +} + +func (c *FakeCiliumV2) CiliumEndpoints(namespace string) v2.CiliumEndpointInterface { + return &FakeCiliumEndpoints{c, namespace} +} + +func (c *FakeCiliumV2) CiliumEnvoyConfigs(namespace string) v2.CiliumEnvoyConfigInterface { + return &FakeCiliumEnvoyConfigs{c, namespace} +} + +func (c *FakeCiliumV2) CiliumExternalWorkloads() v2.CiliumExternalWorkloadInterface { + return &FakeCiliumExternalWorkloads{c} +} + +func (c *FakeCiliumV2) CiliumIdentities() v2.CiliumIdentityInterface { + return &FakeCiliumIdentities{c} +} + +func (c *FakeCiliumV2) CiliumLocalRedirectPolicies(namespace string) v2.CiliumLocalRedirectPolicyInterface { + return &FakeCiliumLocalRedirectPolicies{c, namespace} +} + +func (c *FakeCiliumV2) CiliumNetworkPolicies(namespace string) v2.CiliumNetworkPolicyInterface { + return &FakeCiliumNetworkPolicies{c, namespace} +} + +func (c *FakeCiliumV2) CiliumNodes() v2.CiliumNodeInterface { + return &FakeCiliumNodes{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCiliumV2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go new file mode 100644 index 0000000000..ecabad97a3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumClusterwideEnvoyConfigs implements CiliumClusterwideEnvoyConfigInterface +type FakeCiliumClusterwideEnvoyConfigs struct { + Fake *FakeCiliumV2 +} + +var ciliumclusterwideenvoyconfigsResource = v2.SchemeGroupVersion.WithResource("ciliumclusterwideenvoyconfigs") + +var ciliumclusterwideenvoyconfigsKind = v2.SchemeGroupVersion.WithKind("CiliumClusterwideEnvoyConfig") + +// Get takes name of the ciliumClusterwideEnvoyConfig, and returns the corresponding ciliumClusterwideEnvoyConfig object, and an error if there is any. +func (c *FakeCiliumClusterwideEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumclusterwideenvoyconfigsResource, name), &v2.CiliumClusterwideEnvoyConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideEnvoyConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumClusterwideEnvoyConfigs that match those selectors. +func (c *FakeCiliumClusterwideEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideEnvoyConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumclusterwideenvoyconfigsResource, ciliumclusterwideenvoyconfigsKind, opts), &v2.CiliumClusterwideEnvoyConfigList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumClusterwideEnvoyConfigList{ListMeta: obj.(*v2.CiliumClusterwideEnvoyConfigList).ListMeta} + for _, item := range obj.(*v2.CiliumClusterwideEnvoyConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumClusterwideEnvoyConfigs. +func (c *FakeCiliumClusterwideEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumclusterwideenvoyconfigsResource, opts)) +} + +// Create takes the representation of a ciliumClusterwideEnvoyConfig and creates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any. +func (c *FakeCiliumClusterwideEnvoyConfigs) Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumclusterwideenvoyconfigsResource, ciliumClusterwideEnvoyConfig), &v2.CiliumClusterwideEnvoyConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideEnvoyConfig), err +} + +// Update takes the representation of a ciliumClusterwideEnvoyConfig and updates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any. +func (c *FakeCiliumClusterwideEnvoyConfigs) Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumclusterwideenvoyconfigsResource, ciliumClusterwideEnvoyConfig), &v2.CiliumClusterwideEnvoyConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideEnvoyConfig), err +} + +// Delete takes name of the ciliumClusterwideEnvoyConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumClusterwideEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumclusterwideenvoyconfigsResource, name, opts), &v2.CiliumClusterwideEnvoyConfig{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumClusterwideEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumclusterwideenvoyconfigsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumClusterwideEnvoyConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumClusterwideEnvoyConfig. +func (c *FakeCiliumClusterwideEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumclusterwideenvoyconfigsResource, name, pt, data, subresources...), &v2.CiliumClusterwideEnvoyConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideEnvoyConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go new file mode 100644 index 0000000000..d3d2be6243 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumClusterwideNetworkPolicies implements CiliumClusterwideNetworkPolicyInterface +type FakeCiliumClusterwideNetworkPolicies struct { + Fake *FakeCiliumV2 +} + +var ciliumclusterwidenetworkpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumclusterwidenetworkpolicies") + +var ciliumclusterwidenetworkpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumClusterwideNetworkPolicy") + +// Get takes name of the ciliumClusterwideNetworkPolicy, and returns the corresponding ciliumClusterwideNetworkPolicy object, and an error if there is any. +func (c *FakeCiliumClusterwideNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumclusterwidenetworkpoliciesResource, name), &v2.CiliumClusterwideNetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideNetworkPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumClusterwideNetworkPolicies that match those selectors. +func (c *FakeCiliumClusterwideNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideNetworkPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumclusterwidenetworkpoliciesResource, ciliumclusterwidenetworkpoliciesKind, opts), &v2.CiliumClusterwideNetworkPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumClusterwideNetworkPolicyList{ListMeta: obj.(*v2.CiliumClusterwideNetworkPolicyList).ListMeta} + for _, item := range obj.(*v2.CiliumClusterwideNetworkPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumClusterwideNetworkPolicies. +func (c *FakeCiliumClusterwideNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumclusterwidenetworkpoliciesResource, opts)) +} + +// Create takes the representation of a ciliumClusterwideNetworkPolicy and creates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any. +func (c *FakeCiliumClusterwideNetworkPolicies) Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumclusterwidenetworkpoliciesResource, ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideNetworkPolicy), err +} + +// Update takes the representation of a ciliumClusterwideNetworkPolicy and updates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any. +func (c *FakeCiliumClusterwideNetworkPolicies) Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumclusterwidenetworkpoliciesResource, ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideNetworkPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumClusterwideNetworkPolicies) UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliumclusterwidenetworkpoliciesResource, "status", ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideNetworkPolicy), err +} + +// Delete takes name of the ciliumClusterwideNetworkPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumClusterwideNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumclusterwidenetworkpoliciesResource, name, opts), &v2.CiliumClusterwideNetworkPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumClusterwideNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumclusterwidenetworkpoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumClusterwideNetworkPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumClusterwideNetworkPolicy. +func (c *FakeCiliumClusterwideNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumclusterwidenetworkpoliciesResource, name, pt, data, subresources...), &v2.CiliumClusterwideNetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumClusterwideNetworkPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go new file mode 100644 index 0000000000..993ab79e28 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumEgressGatewayPolicies implements CiliumEgressGatewayPolicyInterface +type FakeCiliumEgressGatewayPolicies struct { + Fake *FakeCiliumV2 +} + +var ciliumegressgatewaypoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumegressgatewaypolicies") + +var ciliumegressgatewaypoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumEgressGatewayPolicy") + +// Get takes name of the ciliumEgressGatewayPolicy, and returns the corresponding ciliumEgressGatewayPolicy object, and an error if there is any. +func (c *FakeCiliumEgressGatewayPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumegressgatewaypoliciesResource, name), &v2.CiliumEgressGatewayPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEgressGatewayPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumEgressGatewayPolicies that match those selectors. +func (c *FakeCiliumEgressGatewayPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEgressGatewayPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumegressgatewaypoliciesResource, ciliumegressgatewaypoliciesKind, opts), &v2.CiliumEgressGatewayPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumEgressGatewayPolicyList{ListMeta: obj.(*v2.CiliumEgressGatewayPolicyList).ListMeta} + for _, item := range obj.(*v2.CiliumEgressGatewayPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumEgressGatewayPolicies. +func (c *FakeCiliumEgressGatewayPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumegressgatewaypoliciesResource, opts)) +} + +// Create takes the representation of a ciliumEgressGatewayPolicy and creates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any. +func (c *FakeCiliumEgressGatewayPolicies) Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumegressgatewaypoliciesResource, ciliumEgressGatewayPolicy), &v2.CiliumEgressGatewayPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEgressGatewayPolicy), err +} + +// Update takes the representation of a ciliumEgressGatewayPolicy and updates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any. +func (c *FakeCiliumEgressGatewayPolicies) Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumegressgatewaypoliciesResource, ciliumEgressGatewayPolicy), &v2.CiliumEgressGatewayPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEgressGatewayPolicy), err +} + +// Delete takes name of the ciliumEgressGatewayPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumEgressGatewayPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumegressgatewaypoliciesResource, name, opts), &v2.CiliumEgressGatewayPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumEgressGatewayPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumegressgatewaypoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumEgressGatewayPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumEgressGatewayPolicy. +func (c *FakeCiliumEgressGatewayPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumegressgatewaypoliciesResource, name, pt, data, subresources...), &v2.CiliumEgressGatewayPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEgressGatewayPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go new file mode 100644 index 0000000000..5ec15c48b0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumEndpoints implements CiliumEndpointInterface +type FakeCiliumEndpoints struct { + Fake *FakeCiliumV2 + ns string +} + +var ciliumendpointsResource = v2.SchemeGroupVersion.WithResource("ciliumendpoints") + +var ciliumendpointsKind = v2.SchemeGroupVersion.WithKind("CiliumEndpoint") + +// Get takes name of the ciliumEndpoint, and returns the corresponding ciliumEndpoint object, and an error if there is any. +func (c *FakeCiliumEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEndpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ciliumendpointsResource, c.ns, name), &v2.CiliumEndpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEndpoint), err +} + +// List takes label and field selectors, and returns the list of CiliumEndpoints that match those selectors. +func (c *FakeCiliumEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEndpointList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ciliumendpointsResource, ciliumendpointsKind, c.ns, opts), &v2.CiliumEndpointList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumEndpointList{ListMeta: obj.(*v2.CiliumEndpointList).ListMeta} + for _, item := range obj.(*v2.CiliumEndpointList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumEndpoints. +func (c *FakeCiliumEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ciliumendpointsResource, c.ns, opts)) + +} + +// Create takes the representation of a ciliumEndpoint and creates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any. +func (c *FakeCiliumEndpoints) Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (result *v2.CiliumEndpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ciliumendpointsResource, c.ns, ciliumEndpoint), &v2.CiliumEndpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEndpoint), err +} + +// Update takes the representation of a ciliumEndpoint and updates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any. +func (c *FakeCiliumEndpoints) Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ciliumendpointsResource, c.ns, ciliumEndpoint), &v2.CiliumEndpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEndpoint), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumEndpoints) UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ciliumendpointsResource, "status", c.ns, ciliumEndpoint), &v2.CiliumEndpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEndpoint), err +} + +// Delete takes name of the ciliumEndpoint and deletes it. Returns an error if one occurs. +func (c *FakeCiliumEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ciliumendpointsResource, c.ns, name, opts), &v2.CiliumEndpoint{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ciliumendpointsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumEndpointList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumEndpoint. +func (c *FakeCiliumEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ciliumendpointsResource, c.ns, name, pt, data, subresources...), &v2.CiliumEndpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEndpoint), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go new file mode 100644 index 0000000000..fa05926956 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumEnvoyConfigs implements CiliumEnvoyConfigInterface +type FakeCiliumEnvoyConfigs struct { + Fake *FakeCiliumV2 + ns string +} + +var ciliumenvoyconfigsResource = v2.SchemeGroupVersion.WithResource("ciliumenvoyconfigs") + +var ciliumenvoyconfigsKind = v2.SchemeGroupVersion.WithKind("CiliumEnvoyConfig") + +// Get takes name of the ciliumEnvoyConfig, and returns the corresponding ciliumEnvoyConfig object, and an error if there is any. +func (c *FakeCiliumEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ciliumenvoyconfigsResource, c.ns, name), &v2.CiliumEnvoyConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEnvoyConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumEnvoyConfigs that match those selectors. +func (c *FakeCiliumEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEnvoyConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ciliumenvoyconfigsResource, ciliumenvoyconfigsKind, c.ns, opts), &v2.CiliumEnvoyConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumEnvoyConfigList{ListMeta: obj.(*v2.CiliumEnvoyConfigList).ListMeta} + for _, item := range obj.(*v2.CiliumEnvoyConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumEnvoyConfigs. +func (c *FakeCiliumEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ciliumenvoyconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a ciliumEnvoyConfig and creates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any. +func (c *FakeCiliumEnvoyConfigs) Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ciliumenvoyconfigsResource, c.ns, ciliumEnvoyConfig), &v2.CiliumEnvoyConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEnvoyConfig), err +} + +// Update takes the representation of a ciliumEnvoyConfig and updates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any. +func (c *FakeCiliumEnvoyConfigs) Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ciliumenvoyconfigsResource, c.ns, ciliumEnvoyConfig), &v2.CiliumEnvoyConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEnvoyConfig), err +} + +// Delete takes name of the ciliumEnvoyConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ciliumenvoyconfigsResource, c.ns, name, opts), &v2.CiliumEnvoyConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ciliumenvoyconfigsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumEnvoyConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumEnvoyConfig. +func (c *FakeCiliumEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ciliumenvoyconfigsResource, c.ns, name, pt, data, subresources...), &v2.CiliumEnvoyConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumEnvoyConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go new file mode 100644 index 0000000000..e947be5f1c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumExternalWorkloads implements CiliumExternalWorkloadInterface +type FakeCiliumExternalWorkloads struct { + Fake *FakeCiliumV2 +} + +var ciliumexternalworkloadsResource = v2.SchemeGroupVersion.WithResource("ciliumexternalworkloads") + +var ciliumexternalworkloadsKind = v2.SchemeGroupVersion.WithKind("CiliumExternalWorkload") + +// Get takes name of the ciliumExternalWorkload, and returns the corresponding ciliumExternalWorkload object, and an error if there is any. +func (c *FakeCiliumExternalWorkloads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumExternalWorkload, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumexternalworkloadsResource, name), &v2.CiliumExternalWorkload{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumExternalWorkload), err +} + +// List takes label and field selectors, and returns the list of CiliumExternalWorkloads that match those selectors. +func (c *FakeCiliumExternalWorkloads) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumExternalWorkloadList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumexternalworkloadsResource, ciliumexternalworkloadsKind, opts), &v2.CiliumExternalWorkloadList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumExternalWorkloadList{ListMeta: obj.(*v2.CiliumExternalWorkloadList).ListMeta} + for _, item := range obj.(*v2.CiliumExternalWorkloadList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumExternalWorkloads. +func (c *FakeCiliumExternalWorkloads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumexternalworkloadsResource, opts)) +} + +// Create takes the representation of a ciliumExternalWorkload and creates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any. +func (c *FakeCiliumExternalWorkloads) Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (result *v2.CiliumExternalWorkload, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumexternalworkloadsResource, ciliumExternalWorkload), &v2.CiliumExternalWorkload{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumExternalWorkload), err +} + +// Update takes the representation of a ciliumExternalWorkload and updates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any. +func (c *FakeCiliumExternalWorkloads) Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumexternalworkloadsResource, ciliumExternalWorkload), &v2.CiliumExternalWorkload{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumExternalWorkload), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumExternalWorkloads) UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliumexternalworkloadsResource, "status", ciliumExternalWorkload), &v2.CiliumExternalWorkload{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumExternalWorkload), err +} + +// Delete takes name of the ciliumExternalWorkload and deletes it. Returns an error if one occurs. +func (c *FakeCiliumExternalWorkloads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumexternalworkloadsResource, name, opts), &v2.CiliumExternalWorkload{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumExternalWorkloads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumexternalworkloadsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumExternalWorkloadList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumExternalWorkload. +func (c *FakeCiliumExternalWorkloads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumexternalworkloadsResource, name, pt, data, subresources...), &v2.CiliumExternalWorkload{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumExternalWorkload), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go new file mode 100644 index 0000000000..745193c7d3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumIdentities implements CiliumIdentityInterface +type FakeCiliumIdentities struct { + Fake *FakeCiliumV2 +} + +var ciliumidentitiesResource = v2.SchemeGroupVersion.WithResource("ciliumidentities") + +var ciliumidentitiesKind = v2.SchemeGroupVersion.WithKind("CiliumIdentity") + +// Get takes name of the ciliumIdentity, and returns the corresponding ciliumIdentity object, and an error if there is any. +func (c *FakeCiliumIdentities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumIdentity, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumidentitiesResource, name), &v2.CiliumIdentity{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumIdentity), err +} + +// List takes label and field selectors, and returns the list of CiliumIdentities that match those selectors. +func (c *FakeCiliumIdentities) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumIdentityList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumidentitiesResource, ciliumidentitiesKind, opts), &v2.CiliumIdentityList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumIdentityList{ListMeta: obj.(*v2.CiliumIdentityList).ListMeta} + for _, item := range obj.(*v2.CiliumIdentityList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumIdentities. +func (c *FakeCiliumIdentities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumidentitiesResource, opts)) +} + +// Create takes the representation of a ciliumIdentity and creates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any. +func (c *FakeCiliumIdentities) Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (result *v2.CiliumIdentity, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumidentitiesResource, ciliumIdentity), &v2.CiliumIdentity{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumIdentity), err +} + +// Update takes the representation of a ciliumIdentity and updates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any. +func (c *FakeCiliumIdentities) Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (result *v2.CiliumIdentity, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumidentitiesResource, ciliumIdentity), &v2.CiliumIdentity{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumIdentity), err +} + +// Delete takes name of the ciliumIdentity and deletes it. Returns an error if one occurs. +func (c *FakeCiliumIdentities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumidentitiesResource, name, opts), &v2.CiliumIdentity{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumIdentities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumidentitiesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumIdentityList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumIdentity. +func (c *FakeCiliumIdentities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumidentitiesResource, name, pt, data, subresources...), &v2.CiliumIdentity{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumIdentity), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go new file mode 100644 index 0000000000..308309ee12 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumLocalRedirectPolicies implements CiliumLocalRedirectPolicyInterface +type FakeCiliumLocalRedirectPolicies struct { + Fake *FakeCiliumV2 + ns string +} + +var ciliumlocalredirectpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumlocalredirectpolicies") + +var ciliumlocalredirectpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumLocalRedirectPolicy") + +// Get takes name of the ciliumLocalRedirectPolicy, and returns the corresponding ciliumLocalRedirectPolicy object, and an error if there is any. +func (c *FakeCiliumLocalRedirectPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ciliumlocalredirectpoliciesResource, c.ns, name), &v2.CiliumLocalRedirectPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumLocalRedirectPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumLocalRedirectPolicies that match those selectors. +func (c *FakeCiliumLocalRedirectPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumLocalRedirectPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ciliumlocalredirectpoliciesResource, ciliumlocalredirectpoliciesKind, c.ns, opts), &v2.CiliumLocalRedirectPolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumLocalRedirectPolicyList{ListMeta: obj.(*v2.CiliumLocalRedirectPolicyList).ListMeta} + for _, item := range obj.(*v2.CiliumLocalRedirectPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumLocalRedirectPolicies. +func (c *FakeCiliumLocalRedirectPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ciliumlocalredirectpoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a ciliumLocalRedirectPolicy and creates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any. +func (c *FakeCiliumLocalRedirectPolicies) Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ciliumlocalredirectpoliciesResource, c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumLocalRedirectPolicy), err +} + +// Update takes the representation of a ciliumLocalRedirectPolicy and updates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any. +func (c *FakeCiliumLocalRedirectPolicies) Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ciliumlocalredirectpoliciesResource, c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumLocalRedirectPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumLocalRedirectPolicies) UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ciliumlocalredirectpoliciesResource, "status", c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumLocalRedirectPolicy), err +} + +// Delete takes name of the ciliumLocalRedirectPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumLocalRedirectPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ciliumlocalredirectpoliciesResource, c.ns, name, opts), &v2.CiliumLocalRedirectPolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumLocalRedirectPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ciliumlocalredirectpoliciesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumLocalRedirectPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumLocalRedirectPolicy. +func (c *FakeCiliumLocalRedirectPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ciliumlocalredirectpoliciesResource, c.ns, name, pt, data, subresources...), &v2.CiliumLocalRedirectPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumLocalRedirectPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go new file mode 100644 index 0000000000..b40b9740e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumNetworkPolicies implements CiliumNetworkPolicyInterface +type FakeCiliumNetworkPolicies struct { + Fake *FakeCiliumV2 + ns string +} + +var ciliumnetworkpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumnetworkpolicies") + +var ciliumnetworkpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumNetworkPolicy") + +// Get takes name of the ciliumNetworkPolicy, and returns the corresponding ciliumNetworkPolicy object, and an error if there is any. +func (c *FakeCiliumNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ciliumnetworkpoliciesResource, c.ns, name), &v2.CiliumNetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNetworkPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumNetworkPolicies that match those selectors. +func (c *FakeCiliumNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNetworkPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ciliumnetworkpoliciesResource, ciliumnetworkpoliciesKind, c.ns, opts), &v2.CiliumNetworkPolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumNetworkPolicyList{ListMeta: obj.(*v2.CiliumNetworkPolicyList).ListMeta} + for _, item := range obj.(*v2.CiliumNetworkPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumNetworkPolicies. +func (c *FakeCiliumNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ciliumnetworkpoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a ciliumNetworkPolicy and creates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any. +func (c *FakeCiliumNetworkPolicies) Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ciliumnetworkpoliciesResource, c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNetworkPolicy), err +} + +// Update takes the representation of a ciliumNetworkPolicy and updates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any. +func (c *FakeCiliumNetworkPolicies) Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ciliumnetworkpoliciesResource, c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNetworkPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumNetworkPolicies) UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ciliumnetworkpoliciesResource, "status", c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNetworkPolicy), err +} + +// Delete takes name of the ciliumNetworkPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ciliumnetworkpoliciesResource, c.ns, name, opts), &v2.CiliumNetworkPolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ciliumnetworkpoliciesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumNetworkPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumNetworkPolicy. +func (c *FakeCiliumNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ciliumnetworkpoliciesResource, c.ns, name, pt, data, subresources...), &v2.CiliumNetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNetworkPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go new file mode 100644 index 0000000000..73e91b1f95 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumNodes implements CiliumNodeInterface +type FakeCiliumNodes struct { + Fake *FakeCiliumV2 +} + +var ciliumnodesResource = v2.SchemeGroupVersion.WithResource("ciliumnodes") + +var ciliumnodesKind = v2.SchemeGroupVersion.WithKind("CiliumNode") + +// Get takes name of the ciliumNode, and returns the corresponding ciliumNode object, and an error if there is any. +func (c *FakeCiliumNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumnodesResource, name), &v2.CiliumNode{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNode), err +} + +// List takes label and field selectors, and returns the list of CiliumNodes that match those selectors. +func (c *FakeCiliumNodes) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumnodesResource, ciliumnodesKind, opts), &v2.CiliumNodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.CiliumNodeList{ListMeta: obj.(*v2.CiliumNodeList).ListMeta} + for _, item := range obj.(*v2.CiliumNodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumNodes. +func (c *FakeCiliumNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumnodesResource, opts)) +} + +// Create takes the representation of a ciliumNode and creates it. Returns the server's representation of the ciliumNode, and an error, if there is any. +func (c *FakeCiliumNodes) Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (result *v2.CiliumNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumnodesResource, ciliumNode), &v2.CiliumNode{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNode), err +} + +// Update takes the representation of a ciliumNode and updates it. Returns the server's representation of the ciliumNode, and an error, if there is any. +func (c *FakeCiliumNodes) Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumnodesResource, ciliumNode), &v2.CiliumNode{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNode), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumNodes) UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliumnodesResource, "status", ciliumNode), &v2.CiliumNode{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNode), err +} + +// Delete takes name of the ciliumNode and deletes it. Returns an error if one occurs. +func (c *FakeCiliumNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumnodesResource, name, opts), &v2.CiliumNode{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumnodesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2.CiliumNodeList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumNode. +func (c *FakeCiliumNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumnodesResource, name, pt, data, subresources...), &v2.CiliumNode{}) + if obj == nil { + return nil, err + } + return obj.(*v2.CiliumNode), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go new file mode 100644 index 0000000000..a1c62d0ecd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCiliumV2alpha1 struct { + *testing.Fake +} + +func (c *FakeCiliumV2alpha1) CiliumBGPAdvertisements() v2alpha1.CiliumBGPAdvertisementInterface { + return &FakeCiliumBGPAdvertisements{c} +} + +func (c *FakeCiliumV2alpha1) CiliumBGPClusterConfigs() v2alpha1.CiliumBGPClusterConfigInterface { + return &FakeCiliumBGPClusterConfigs{c} +} + +func (c *FakeCiliumV2alpha1) CiliumBGPNodeConfigs() v2alpha1.CiliumBGPNodeConfigInterface { + return &FakeCiliumBGPNodeConfigs{c} +} + +func (c *FakeCiliumV2alpha1) CiliumBGPNodeConfigOverrides() v2alpha1.CiliumBGPNodeConfigOverrideInterface { + return &FakeCiliumBGPNodeConfigOverrides{c} +} + +func (c *FakeCiliumV2alpha1) CiliumBGPPeerConfigs() v2alpha1.CiliumBGPPeerConfigInterface { + return &FakeCiliumBGPPeerConfigs{c} +} + +func (c *FakeCiliumV2alpha1) CiliumBGPPeeringPolicies() v2alpha1.CiliumBGPPeeringPolicyInterface { + return &FakeCiliumBGPPeeringPolicies{c} +} + +func (c *FakeCiliumV2alpha1) CiliumCIDRGroups() v2alpha1.CiliumCIDRGroupInterface { + return &FakeCiliumCIDRGroups{c} +} + +func (c *FakeCiliumV2alpha1) CiliumEndpointSlices() v2alpha1.CiliumEndpointSliceInterface { + return &FakeCiliumEndpointSlices{c} +} + +func (c *FakeCiliumV2alpha1) CiliumL2AnnouncementPolicies() v2alpha1.CiliumL2AnnouncementPolicyInterface { + return &FakeCiliumL2AnnouncementPolicies{c} +} + +func (c *FakeCiliumV2alpha1) CiliumLoadBalancerIPPools() v2alpha1.CiliumLoadBalancerIPPoolInterface { + return &FakeCiliumLoadBalancerIPPools{c} +} + +func (c *FakeCiliumV2alpha1) CiliumNodeConfigs(namespace string) v2alpha1.CiliumNodeConfigInterface { + return &FakeCiliumNodeConfigs{c, namespace} +} + +func (c *FakeCiliumV2alpha1) CiliumPodIPPools() v2alpha1.CiliumPodIPPoolInterface { + return &FakeCiliumPodIPPools{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCiliumV2alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpadvertisement.go new file mode 100644 index 0000000000..1afca17801 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpadvertisement.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPAdvertisements implements CiliumBGPAdvertisementInterface +type FakeCiliumBGPAdvertisements struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgpadvertisementsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpadvertisements") + +var ciliumbgpadvertisementsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPAdvertisement") + +// Get takes name of the ciliumBGPAdvertisement, and returns the corresponding ciliumBGPAdvertisement object, and an error if there is any. +func (c *FakeCiliumBGPAdvertisements) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgpadvertisementsResource, name), &v2alpha1.CiliumBGPAdvertisement{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPAdvertisement), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPAdvertisements that match those selectors. +func (c *FakeCiliumBGPAdvertisements) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPAdvertisementList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgpadvertisementsResource, ciliumbgpadvertisementsKind, opts), &v2alpha1.CiliumBGPAdvertisementList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPAdvertisementList{ListMeta: obj.(*v2alpha1.CiliumBGPAdvertisementList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPAdvertisementList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPAdvertisements. +func (c *FakeCiliumBGPAdvertisements) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgpadvertisementsResource, opts)) +} + +// Create takes the representation of a ciliumBGPAdvertisement and creates it. Returns the server's representation of the ciliumBGPAdvertisement, and an error, if there is any. +func (c *FakeCiliumBGPAdvertisements) Create(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgpadvertisementsResource, ciliumBGPAdvertisement), &v2alpha1.CiliumBGPAdvertisement{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPAdvertisement), err +} + +// Update takes the representation of a ciliumBGPAdvertisement and updates it. Returns the server's representation of the ciliumBGPAdvertisement, and an error, if there is any. +func (c *FakeCiliumBGPAdvertisements) Update(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgpadvertisementsResource, ciliumBGPAdvertisement), &v2alpha1.CiliumBGPAdvertisement{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPAdvertisement), err +} + +// Delete takes name of the ciliumBGPAdvertisement and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPAdvertisements) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgpadvertisementsResource, name, opts), &v2alpha1.CiliumBGPAdvertisement{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPAdvertisements) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgpadvertisementsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPAdvertisementList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPAdvertisement. +func (c *FakeCiliumBGPAdvertisements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgpadvertisementsResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPAdvertisement{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPAdvertisement), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..f01cc1930c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPClusterConfigs implements CiliumBGPClusterConfigInterface +type FakeCiliumBGPClusterConfigs struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgpclusterconfigsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpclusterconfigs") + +var ciliumbgpclusterconfigsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPClusterConfig") + +// Get takes name of the ciliumBGPClusterConfig, and returns the corresponding ciliumBGPClusterConfig object, and an error if there is any. +func (c *FakeCiliumBGPClusterConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgpclusterconfigsResource, name), &v2alpha1.CiliumBGPClusterConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPClusterConfigs that match those selectors. +func (c *FakeCiliumBGPClusterConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPClusterConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgpclusterconfigsResource, ciliumbgpclusterconfigsKind, opts), &v2alpha1.CiliumBGPClusterConfigList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPClusterConfigList{ListMeta: obj.(*v2alpha1.CiliumBGPClusterConfigList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPClusterConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPClusterConfigs. +func (c *FakeCiliumBGPClusterConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgpclusterconfigsResource, opts)) +} + +// Create takes the representation of a ciliumBGPClusterConfig and creates it. Returns the server's representation of the ciliumBGPClusterConfig, and an error, if there is any. +func (c *FakeCiliumBGPClusterConfigs) Create(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgpclusterconfigsResource, ciliumBGPClusterConfig), &v2alpha1.CiliumBGPClusterConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), err +} + +// Update takes the representation of a ciliumBGPClusterConfig and updates it. Returns the server's representation of the ciliumBGPClusterConfig, and an error, if there is any. +func (c *FakeCiliumBGPClusterConfigs) Update(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgpclusterconfigsResource, ciliumBGPClusterConfig), &v2alpha1.CiliumBGPClusterConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), err +} + +// Delete takes name of the ciliumBGPClusterConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPClusterConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgpclusterconfigsResource, name, opts), &v2alpha1.CiliumBGPClusterConfig{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPClusterConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgpclusterconfigsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPClusterConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPClusterConfig. +func (c *FakeCiliumBGPClusterConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgpclusterconfigsResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPClusterConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..9bd9953a2d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfig.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPNodeConfigs implements CiliumBGPNodeConfigInterface +type FakeCiliumBGPNodeConfigs struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgpnodeconfigsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigs") + +var ciliumbgpnodeconfigsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPNodeConfig") + +// Get takes name of the ciliumBGPNodeConfig, and returns the corresponding ciliumBGPNodeConfig object, and an error if there is any. +func (c *FakeCiliumBGPNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgpnodeconfigsResource, name), &v2alpha1.CiliumBGPNodeConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPNodeConfigs that match those selectors. +func (c *FakeCiliumBGPNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPNodeConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgpnodeconfigsResource, ciliumbgpnodeconfigsKind, opts), &v2alpha1.CiliumBGPNodeConfigList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPNodeConfigList{ListMeta: obj.(*v2alpha1.CiliumBGPNodeConfigList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPNodeConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPNodeConfigs. +func (c *FakeCiliumBGPNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgpnodeconfigsResource, opts)) +} + +// Create takes the representation of a ciliumBGPNodeConfig and creates it. Returns the server's representation of the ciliumBGPNodeConfig, and an error, if there is any. +func (c *FakeCiliumBGPNodeConfigs) Create(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgpnodeconfigsResource, ciliumBGPNodeConfig), &v2alpha1.CiliumBGPNodeConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), err +} + +// Update takes the representation of a ciliumBGPNodeConfig and updates it. Returns the server's representation of the ciliumBGPNodeConfig, and an error, if there is any. +func (c *FakeCiliumBGPNodeConfigs) Update(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgpnodeconfigsResource, ciliumBGPNodeConfig), &v2alpha1.CiliumBGPNodeConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumBGPNodeConfigs) UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPNodeConfig, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliumbgpnodeconfigsResource, "status", ciliumBGPNodeConfig), &v2alpha1.CiliumBGPNodeConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), err +} + +// Delete takes name of the ciliumBGPNodeConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgpnodeconfigsResource, name, opts), &v2alpha1.CiliumBGPNodeConfig{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgpnodeconfigsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPNodeConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPNodeConfig. +func (c *FakeCiliumBGPNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgpnodeconfigsResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPNodeConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..c13613f23f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpnodeconfigoverride.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPNodeConfigOverrides implements CiliumBGPNodeConfigOverrideInterface +type FakeCiliumBGPNodeConfigOverrides struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgpnodeconfigoverridesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigoverrides") + +var ciliumbgpnodeconfigoverridesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPNodeConfigOverride") + +// Get takes name of the ciliumBGPNodeConfigOverride, and returns the corresponding ciliumBGPNodeConfigOverride object, and an error if there is any. +func (c *FakeCiliumBGPNodeConfigOverrides) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgpnodeconfigoverridesResource, name), &v2alpha1.CiliumBGPNodeConfigOverride{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfigOverride), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPNodeConfigOverrides that match those selectors. +func (c *FakeCiliumBGPNodeConfigOverrides) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPNodeConfigOverrideList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgpnodeconfigoverridesResource, ciliumbgpnodeconfigoverridesKind, opts), &v2alpha1.CiliumBGPNodeConfigOverrideList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPNodeConfigOverrideList{ListMeta: obj.(*v2alpha1.CiliumBGPNodeConfigOverrideList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPNodeConfigOverrideList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPNodeConfigOverrides. +func (c *FakeCiliumBGPNodeConfigOverrides) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgpnodeconfigoverridesResource, opts)) +} + +// Create takes the representation of a ciliumBGPNodeConfigOverride and creates it. Returns the server's representation of the ciliumBGPNodeConfigOverride, and an error, if there is any. +func (c *FakeCiliumBGPNodeConfigOverrides) Create(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgpnodeconfigoverridesResource, ciliumBGPNodeConfigOverride), &v2alpha1.CiliumBGPNodeConfigOverride{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfigOverride), err +} + +// Update takes the representation of a ciliumBGPNodeConfigOverride and updates it. Returns the server's representation of the ciliumBGPNodeConfigOverride, and an error, if there is any. +func (c *FakeCiliumBGPNodeConfigOverrides) Update(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgpnodeconfigoverridesResource, ciliumBGPNodeConfigOverride), &v2alpha1.CiliumBGPNodeConfigOverride{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfigOverride), err +} + +// Delete takes name of the ciliumBGPNodeConfigOverride and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPNodeConfigOverrides) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgpnodeconfigoverridesResource, name, opts), &v2alpha1.CiliumBGPNodeConfigOverride{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPNodeConfigOverrides) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgpnodeconfigoverridesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPNodeConfigOverrideList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPNodeConfigOverride. +func (c *FakeCiliumBGPNodeConfigOverrides) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgpnodeconfigoverridesResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPNodeConfigOverride{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPNodeConfigOverride), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go new file mode 100644 index 0000000000..393f2fb07d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPPeerConfigs implements CiliumBGPPeerConfigInterface +type FakeCiliumBGPPeerConfigs struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgppeerconfigsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeerconfigs") + +var ciliumbgppeerconfigsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPPeerConfig") + +// Get takes name of the ciliumBGPPeerConfig, and returns the corresponding ciliumBGPPeerConfig object, and an error if there is any. +func (c *FakeCiliumBGPPeerConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgppeerconfigsResource, name), &v2alpha1.CiliumBGPPeerConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPPeerConfigs that match those selectors. +func (c *FakeCiliumBGPPeerConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeerConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgppeerconfigsResource, ciliumbgppeerconfigsKind, opts), &v2alpha1.CiliumBGPPeerConfigList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPPeerConfigList{ListMeta: obj.(*v2alpha1.CiliumBGPPeerConfigList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPPeerConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPPeerConfigs. +func (c *FakeCiliumBGPPeerConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgppeerconfigsResource, opts)) +} + +// Create takes the representation of a ciliumBGPPeerConfig and creates it. Returns the server's representation of the ciliumBGPPeerConfig, and an error, if there is any. +func (c *FakeCiliumBGPPeerConfigs) Create(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgppeerconfigsResource, ciliumBGPPeerConfig), &v2alpha1.CiliumBGPPeerConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), err +} + +// Update takes the representation of a ciliumBGPPeerConfig and updates it. Returns the server's representation of the ciliumBGPPeerConfig, and an error, if there is any. +func (c *FakeCiliumBGPPeerConfigs) Update(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgppeerconfigsResource, ciliumBGPPeerConfig), &v2alpha1.CiliumBGPPeerConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), err +} + +// Delete takes name of the ciliumBGPPeerConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPPeerConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgppeerconfigsResource, name, opts), &v2alpha1.CiliumBGPPeerConfig{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPPeerConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgppeerconfigsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPPeerConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPPeerConfig. +func (c *FakeCiliumBGPPeerConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgppeerconfigsResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPPeerConfig{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go new file mode 100644 index 0000000000..9da1136d41 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumBGPPeeringPolicies implements CiliumBGPPeeringPolicyInterface +type FakeCiliumBGPPeeringPolicies struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumbgppeeringpoliciesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeeringpolicies") + +var ciliumbgppeeringpoliciesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPPeeringPolicy") + +// Get takes name of the ciliumBGPPeeringPolicy, and returns the corresponding ciliumBGPPeeringPolicy object, and an error if there is any. +func (c *FakeCiliumBGPPeeringPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumbgppeeringpoliciesResource, name), &v2alpha1.CiliumBGPPeeringPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumBGPPeeringPolicies that match those selectors. +func (c *FakeCiliumBGPPeeringPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeeringPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumbgppeeringpoliciesResource, ciliumbgppeeringpoliciesKind, opts), &v2alpha1.CiliumBGPPeeringPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumBGPPeeringPolicyList{ListMeta: obj.(*v2alpha1.CiliumBGPPeeringPolicyList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumBGPPeeringPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPPeeringPolicies. +func (c *FakeCiliumBGPPeeringPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumbgppeeringpoliciesResource, opts)) +} + +// Create takes the representation of a ciliumBGPPeeringPolicy and creates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any. +func (c *FakeCiliumBGPPeeringPolicies) Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumbgppeeringpoliciesResource, ciliumBGPPeeringPolicy), &v2alpha1.CiliumBGPPeeringPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err +} + +// Update takes the representation of a ciliumBGPPeeringPolicy and updates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any. +func (c *FakeCiliumBGPPeeringPolicies) Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumbgppeeringpoliciesResource, ciliumBGPPeeringPolicy), &v2alpha1.CiliumBGPPeeringPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err +} + +// Delete takes name of the ciliumBGPPeeringPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumBGPPeeringPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgppeeringpoliciesResource, name, opts), &v2alpha1.CiliumBGPPeeringPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumBGPPeeringPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumbgppeeringpoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPPeeringPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumBGPPeeringPolicy. +func (c *FakeCiliumBGPPeeringPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumbgppeeringpoliciesResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPPeeringPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go new file mode 100644 index 0000000000..ec271bbc42 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumCIDRGroups implements CiliumCIDRGroupInterface +type FakeCiliumCIDRGroups struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumcidrgroupsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumcidrgroups") + +var ciliumcidrgroupsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumCIDRGroup") + +// Get takes name of the ciliumCIDRGroup, and returns the corresponding ciliumCIDRGroup object, and an error if there is any. +func (c *FakeCiliumCIDRGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumcidrgroupsResource, name), &v2alpha1.CiliumCIDRGroup{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumCIDRGroup), err +} + +// List takes label and field selectors, and returns the list of CiliumCIDRGroups that match those selectors. +func (c *FakeCiliumCIDRGroups) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumCIDRGroupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumcidrgroupsResource, ciliumcidrgroupsKind, opts), &v2alpha1.CiliumCIDRGroupList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumCIDRGroupList{ListMeta: obj.(*v2alpha1.CiliumCIDRGroupList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumCIDRGroupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumCIDRGroups. +func (c *FakeCiliumCIDRGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumcidrgroupsResource, opts)) +} + +// Create takes the representation of a ciliumCIDRGroup and creates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any. +func (c *FakeCiliumCIDRGroups) Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumcidrgroupsResource, ciliumCIDRGroup), &v2alpha1.CiliumCIDRGroup{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumCIDRGroup), err +} + +// Update takes the representation of a ciliumCIDRGroup and updates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any. +func (c *FakeCiliumCIDRGroups) Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumcidrgroupsResource, ciliumCIDRGroup), &v2alpha1.CiliumCIDRGroup{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumCIDRGroup), err +} + +// Delete takes name of the ciliumCIDRGroup and deletes it. Returns an error if one occurs. +func (c *FakeCiliumCIDRGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumcidrgroupsResource, name, opts), &v2alpha1.CiliumCIDRGroup{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumCIDRGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumcidrgroupsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumCIDRGroupList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumCIDRGroup. +func (c *FakeCiliumCIDRGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumcidrgroupsResource, name, pt, data, subresources...), &v2alpha1.CiliumCIDRGroup{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumCIDRGroup), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go new file mode 100644 index 0000000000..447afbdde6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumEndpointSlices implements CiliumEndpointSliceInterface +type FakeCiliumEndpointSlices struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumendpointslicesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumendpointslices") + +var ciliumendpointslicesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumEndpointSlice") + +// Get takes name of the ciliumEndpointSlice, and returns the corresponding ciliumEndpointSlice object, and an error if there is any. +func (c *FakeCiliumEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumendpointslicesResource, name), &v2alpha1.CiliumEndpointSlice{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumEndpointSlice), err +} + +// List takes label and field selectors, and returns the list of CiliumEndpointSlices that match those selectors. +func (c *FakeCiliumEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumEndpointSliceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumendpointslicesResource, ciliumendpointslicesKind, opts), &v2alpha1.CiliumEndpointSliceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumEndpointSliceList{ListMeta: obj.(*v2alpha1.CiliumEndpointSliceList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumEndpointSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumEndpointSlices. +func (c *FakeCiliumEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumendpointslicesResource, opts)) +} + +// Create takes the representation of a ciliumEndpointSlice and creates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any. +func (c *FakeCiliumEndpointSlices) Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumendpointslicesResource, ciliumEndpointSlice), &v2alpha1.CiliumEndpointSlice{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumEndpointSlice), err +} + +// Update takes the representation of a ciliumEndpointSlice and updates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any. +func (c *FakeCiliumEndpointSlices) Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumendpointslicesResource, ciliumEndpointSlice), &v2alpha1.CiliumEndpointSlice{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumEndpointSlice), err +} + +// Delete takes name of the ciliumEndpointSlice and deletes it. Returns an error if one occurs. +func (c *FakeCiliumEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumendpointslicesResource, name, opts), &v2alpha1.CiliumEndpointSlice{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumendpointslicesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumEndpointSliceList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumEndpointSlice. +func (c *FakeCiliumEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumendpointslicesResource, name, pt, data, subresources...), &v2alpha1.CiliumEndpointSlice{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumEndpointSlice), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go new file mode 100644 index 0000000000..2ad271efce --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumL2AnnouncementPolicies implements CiliumL2AnnouncementPolicyInterface +type FakeCiliumL2AnnouncementPolicies struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliuml2announcementpoliciesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliuml2announcementpolicies") + +var ciliuml2announcementpoliciesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumL2AnnouncementPolicy") + +// Get takes name of the ciliumL2AnnouncementPolicy, and returns the corresponding ciliumL2AnnouncementPolicy object, and an error if there is any. +func (c *FakeCiliumL2AnnouncementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliuml2announcementpoliciesResource, name), &v2alpha1.CiliumL2AnnouncementPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err +} + +// List takes label and field selectors, and returns the list of CiliumL2AnnouncementPolicies that match those selectors. +func (c *FakeCiliumL2AnnouncementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumL2AnnouncementPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliuml2announcementpoliciesResource, ciliuml2announcementpoliciesKind, opts), &v2alpha1.CiliumL2AnnouncementPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumL2AnnouncementPolicyList{ListMeta: obj.(*v2alpha1.CiliumL2AnnouncementPolicyList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumL2AnnouncementPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumL2AnnouncementPolicies. +func (c *FakeCiliumL2AnnouncementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliuml2announcementpoliciesResource, opts)) +} + +// Create takes the representation of a ciliumL2AnnouncementPolicy and creates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any. +func (c *FakeCiliumL2AnnouncementPolicies) Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliuml2announcementpoliciesResource, ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err +} + +// Update takes the representation of a ciliumL2AnnouncementPolicy and updates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any. +func (c *FakeCiliumL2AnnouncementPolicies) Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliuml2announcementpoliciesResource, ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumL2AnnouncementPolicies) UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliuml2announcementpoliciesResource, "status", ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err +} + +// Delete takes name of the ciliumL2AnnouncementPolicy and deletes it. Returns an error if one occurs. +func (c *FakeCiliumL2AnnouncementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliuml2announcementpoliciesResource, name, opts), &v2alpha1.CiliumL2AnnouncementPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumL2AnnouncementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliuml2announcementpoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumL2AnnouncementPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumL2AnnouncementPolicy. +func (c *FakeCiliumL2AnnouncementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliuml2announcementpoliciesResource, name, pt, data, subresources...), &v2alpha1.CiliumL2AnnouncementPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go new file mode 100644 index 0000000000..b150f24fa6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface +type FakeCiliumLoadBalancerIPPools struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumloadbalancerippoolsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumloadbalancerippools") + +var ciliumloadbalancerippoolsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumLoadBalancerIPPool") + +// Get takes name of the ciliumLoadBalancerIPPool, and returns the corresponding ciliumLoadBalancerIPPool object, and an error if there is any. +func (c *FakeCiliumLoadBalancerIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumloadbalancerippoolsResource, name), &v2alpha1.CiliumLoadBalancerIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err +} + +// List takes label and field selectors, and returns the list of CiliumLoadBalancerIPPools that match those selectors. +func (c *FakeCiliumLoadBalancerIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumLoadBalancerIPPoolList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumloadbalancerippoolsResource, ciliumloadbalancerippoolsKind, opts), &v2alpha1.CiliumLoadBalancerIPPoolList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumLoadBalancerIPPoolList{ListMeta: obj.(*v2alpha1.CiliumLoadBalancerIPPoolList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumLoadBalancerIPPoolList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumLoadBalancerIPPools. +func (c *FakeCiliumLoadBalancerIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumloadbalancerippoolsResource, opts)) +} + +// Create takes the representation of a ciliumLoadBalancerIPPool and creates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any. +func (c *FakeCiliumLoadBalancerIPPools) Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumloadbalancerippoolsResource, ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err +} + +// Update takes the representation of a ciliumLoadBalancerIPPool and updates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any. +func (c *FakeCiliumLoadBalancerIPPools) Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumloadbalancerippoolsResource, ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumLoadBalancerIPPools) UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ciliumloadbalancerippoolsResource, "status", ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err +} + +// Delete takes name of the ciliumLoadBalancerIPPool and deletes it. Returns an error if one occurs. +func (c *FakeCiliumLoadBalancerIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumloadbalancerippoolsResource, name, opts), &v2alpha1.CiliumLoadBalancerIPPool{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumLoadBalancerIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumloadbalancerippoolsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumLoadBalancerIPPoolList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumLoadBalancerIPPool. +func (c *FakeCiliumLoadBalancerIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumloadbalancerippoolsResource, name, pt, data, subresources...), &v2alpha1.CiliumLoadBalancerIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go new file mode 100644 index 0000000000..a826f0bc09 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumNodeConfigs implements CiliumNodeConfigInterface +type FakeCiliumNodeConfigs struct { + Fake *FakeCiliumV2alpha1 + ns string +} + +var ciliumnodeconfigsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumnodeconfigs") + +var ciliumnodeconfigsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumNodeConfig") + +// Get takes name of the ciliumNodeConfig, and returns the corresponding ciliumNodeConfig object, and an error if there is any. +func (c *FakeCiliumNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ciliumnodeconfigsResource, c.ns, name), &v2alpha1.CiliumNodeConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumNodeConfig), err +} + +// List takes label and field selectors, and returns the list of CiliumNodeConfigs that match those selectors. +func (c *FakeCiliumNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumNodeConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ciliumnodeconfigsResource, ciliumnodeconfigsKind, c.ns, opts), &v2alpha1.CiliumNodeConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumNodeConfigList{ListMeta: obj.(*v2alpha1.CiliumNodeConfigList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumNodeConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumNodeConfigs. +func (c *FakeCiliumNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ciliumnodeconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a ciliumNodeConfig and creates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any. +func (c *FakeCiliumNodeConfigs) Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ciliumnodeconfigsResource, c.ns, ciliumNodeConfig), &v2alpha1.CiliumNodeConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumNodeConfig), err +} + +// Update takes the representation of a ciliumNodeConfig and updates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any. +func (c *FakeCiliumNodeConfigs) Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ciliumnodeconfigsResource, c.ns, ciliumNodeConfig), &v2alpha1.CiliumNodeConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumNodeConfig), err +} + +// Delete takes name of the ciliumNodeConfig and deletes it. Returns an error if one occurs. +func (c *FakeCiliumNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ciliumnodeconfigsResource, c.ns, name, opts), &v2alpha1.CiliumNodeConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ciliumnodeconfigsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumNodeConfigList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumNodeConfig. +func (c *FakeCiliumNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ciliumnodeconfigsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CiliumNodeConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumNodeConfig), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go new file mode 100644 index 0000000000..6922a8582e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCiliumPodIPPools implements CiliumPodIPPoolInterface +type FakeCiliumPodIPPools struct { + Fake *FakeCiliumV2alpha1 +} + +var ciliumpodippoolsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumpodippools") + +var ciliumpodippoolsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumPodIPPool") + +// Get takes name of the ciliumPodIPPool, and returns the corresponding ciliumPodIPPool object, and an error if there is any. +func (c *FakeCiliumPodIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumPodIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ciliumpodippoolsResource, name), &v2alpha1.CiliumPodIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumPodIPPool), err +} + +// List takes label and field selectors, and returns the list of CiliumPodIPPools that match those selectors. +func (c *FakeCiliumPodIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumPodIPPoolList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ciliumpodippoolsResource, ciliumpodippoolsKind, opts), &v2alpha1.CiliumPodIPPoolList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2alpha1.CiliumPodIPPoolList{ListMeta: obj.(*v2alpha1.CiliumPodIPPoolList).ListMeta} + for _, item := range obj.(*v2alpha1.CiliumPodIPPoolList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ciliumPodIPPools. +func (c *FakeCiliumPodIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ciliumpodippoolsResource, opts)) +} + +// Create takes the representation of a ciliumPodIPPool and creates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any. +func (c *FakeCiliumPodIPPools) Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumPodIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ciliumpodippoolsResource, ciliumPodIPPool), &v2alpha1.CiliumPodIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumPodIPPool), err +} + +// Update takes the representation of a ciliumPodIPPool and updates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any. +func (c *FakeCiliumPodIPPools) Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumPodIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ciliumpodippoolsResource, ciliumPodIPPool), &v2alpha1.CiliumPodIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumPodIPPool), err +} + +// Delete takes name of the ciliumPodIPPool and deletes it. Returns an error if one occurs. +func (c *FakeCiliumPodIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ciliumpodippoolsResource, name, opts), &v2alpha1.CiliumPodIPPool{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCiliumPodIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ciliumpodippoolsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v2alpha1.CiliumPodIPPoolList{}) + return err +} + +// Patch applies the patch and returns the patched ciliumPodIPPool. +func (c *FakeCiliumPodIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ciliumpodippoolsResource, name, pt, data, subresources...), &v2alpha1.CiliumPodIPPool{}) + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CiliumPodIPPool), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go new file mode 100644 index 0000000000..ac685bb4b4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "os" + "time" + + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/option" +) + +type Config struct { + // EnableK8s is a flag that, when set to false, forcibly disables the clientset, to let cilium + // operates with CNI-compatible orchestrators other than Kubernetes. Default to true. + EnableK8s bool + + // K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead) + K8sAPIServer string + + // K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file + K8sKubeConfigPath string + + // K8sClientQPS is the queries per second limit for the K8s client. Defaults to k8s client defaults. + K8sClientQPS float32 + + // K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults. + K8sClientBurst int + + // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat + K8sHeartbeatTimeout time.Duration + + // K8sEnableAPIDiscovery enables Kubernetes API discovery + EnableK8sAPIDiscovery bool +} + +var defaultConfig = Config{ + EnableK8s: true, + K8sAPIServer: "", + K8sKubeConfigPath: "", + K8sClientQPS: defaults.K8sClientQPSLimit, + K8sClientBurst: defaults.K8sClientBurst, + K8sHeartbeatTimeout: 30 * time.Second, + EnableK8sAPIDiscovery: defaults.K8sEnableAPIDiscovery, +} + +func (def Config) Flags(flags *pflag.FlagSet) { + flags.Bool(option.EnableK8s, def.EnableK8s, "Enable the k8s clientset") + flags.String(option.K8sAPIServer, def.K8sAPIServer, "Kubernetes API server URL") + flags.String(option.K8sKubeConfigPath, def.K8sKubeConfigPath, "Absolute path of the kubernetes kubeconfig file") + flags.Float32(option.K8sClientQPSLimit, def.K8sClientQPS, "Queries per second limit for the K8s client") + flags.Int(option.K8sClientBurst, def.K8sClientBurst, "Burst value allowed for the K8s client") + flags.Duration(option.K8sHeartbeatTimeout, def.K8sHeartbeatTimeout, "Configures the timeout for api-server heartbeat, set to 0 to disable") + flags.Bool(option.K8sEnableAPIDiscovery, def.EnableK8sAPIDiscovery, "Enable discovery of Kubernetes API groups and resources with the discovery API") +} + +func (cfg Config) isEnabled() bool { + if !cfg.EnableK8s { + return false + } + return cfg.K8sAPIServer != "" || + cfg.K8sKubeConfigPath != "" || + (os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "") || + os.Getenv("K8S_NODE_NAME") != "" +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go new file mode 100644 index 0000000000..fc49940007 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" +) + +// Getters is a set of methods for retrieving common objects. +type Getters interface { + GetSecrets(ctx context.Context, namespace, name string) (map[string][]byte, error) + GetK8sNode(ctx context.Context, nodeName string) (*slim_corev1.Node, error) + GetCiliumNode(ctx context.Context, nodeName string) (*cilium_v2.CiliumNode, error) +} + +// clientsetGetters implements the Getters interface in terms of the clientset. +type clientsetGetters struct { + Clientset +} + +// GetSecrets returns the secrets found in the given namespace and name. +func (cs *clientsetGetters) GetSecrets(ctx context.Context, ns, name string) (map[string][]byte, error) { + if !cs.IsEnabled() { + return nil, fmt.Errorf("GetSecrets: No k8s, cannot access k8s secrets") + } + + result, err := cs.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return result.Data, nil +} + +// GetK8sNode returns the node with the given nodeName. +func (cs *clientsetGetters) GetK8sNode(ctx context.Context, nodeName string) (*slim_corev1.Node, error) { + if !cs.IsEnabled() { + return nil, fmt.Errorf("GetK8sNode: No k8s, cannot access k8s nodes") + } + + return cs.Slim().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) +} + +// GetCiliumNode returns the CiliumNode with the given nodeName. +func (cs *clientsetGetters) GetCiliumNode(ctx context.Context, nodeName string) (*cilium_v2.CiliumNode, error) { + if !cs.IsEnabled() { + return nil, fmt.Errorf("GetK8sNode: No k8s, cannot access k8s nodes") + } + + return cs.CiliumV2().CiliumNodes().Get(ctx, nodeName, metav1.GetOptions{}) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go b/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go new file mode 100644 index 0000000000..1f6cb6dde1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package constants + +const ( + // EnvNodeNameSpec is the environment label used by Kubernetes to + // specify the node's name. + EnvNodeNameSpec = "K8S_NODE_NAME" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/doc.go new file mode 100644 index 0000000000..48744cccbe --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package k8s contains all k8s related logic. +// +groupName=pkg +package k8s diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go new file mode 100644 index 0000000000..50c811ff2f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + "net" + "net/netip" + "sort" + "strconv" + "strings" + + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_discovery_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + slim_discovery_v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/loadbalancer" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + serviceStore "github.com/cilium/cilium/pkg/service/store" +) + +// Endpoints is an abstraction for the Kubernetes endpoints object. Endpoints +// consists of a set of backend IPs in combination with a set of ports and +// protocols. The name of the backend ports must match the names of the +// frontend ports of the corresponding service. +// +// The Endpoints object is parsed from either an EndpointSlice (preferred) or Endpoint +// Kubernetes objects depending on the Kubernetes version. +// +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +deepequal-gen=true +// +deepequal-gen:private-method=true +type Endpoints struct { + types.UnserializableObject + slim_metav1.ObjectMeta + + EndpointSliceID + + // Backends is a map containing all backend IPs and ports. The key to + // the map is the backend IP in string form. The value defines the list + // of ports for that backend IP, plus an additional optional node name. + // Backends map[cmtypes.AddrCluster]*Backend + Backends map[cmtypes.AddrCluster]*Backend +} + +// DeepEqual returns true if both endpoints are deep equal. +func (e *Endpoints) DeepEqual(o *Endpoints) bool { + switch { + case (e == nil) != (o == nil): + return false + case (e == nil) && (o == nil): + return true + } + return e.deepEqual(o) +} + +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + if in.Backends != nil { + in, out := &in.Backends, &out.Backends + *out = make(map[cmtypes.AddrCluster]*Backend, len(*in)) + for key, val := range *in { + var outVal *Backend + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(Backend) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } +} + +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// Backend contains all ports, terminating state, and the node name of a given backend +// +// +k8s:deepcopy-gen=true +// +deepequal-gen=true +type Backend struct { + Ports serviceStore.PortConfiguration + NodeName string + Terminating bool + HintsForZones []string + Preferred bool +} + +// String returns the string representation of an endpoints resource, with +// backends and ports sorted. +func (e *Endpoints) String() string { + if e == nil { + return "" + } + + backends := []string{} + for addrCluster, be := range e.Backends { + for _, port := range be.Ports { + backends = append(backends, fmt.Sprintf("%s/%s", net.JoinHostPort(addrCluster.Addr().String(), strconv.Itoa(int(port.Port))), port.Protocol)) + } + } + + sort.Strings(backends) + + return strings.Join(backends, ",") +} + +// newEndpoints returns a new Endpoints +func newEndpoints() *Endpoints { + return &Endpoints{ + Backends: map[cmtypes.AddrCluster]*Backend{}, + } +} + +// Prefixes returns the endpoint's backends as a slice of netip.Prefix. +func (e *Endpoints) Prefixes() []netip.Prefix { + prefixes := make([]netip.Prefix, 0, len(e.Backends)) + for addrCluster := range e.Backends { + addr := addrCluster.Addr() + prefixes = append(prefixes, netip.PrefixFrom(addr, addr.BitLen())) + } + return prefixes +} + +// ParseEndpointsID parses a Kubernetes endpoints and returns the EndpointSliceID +func ParseEndpointsID(ep *slim_corev1.Endpoints) EndpointSliceID { + return EndpointSliceID{ + ServiceID: ServiceID{ + Name: ep.ObjectMeta.Name, + Namespace: ep.ObjectMeta.Namespace, + }, + EndpointSliceName: ep.ObjectMeta.Name, + } +} + +// ParseEndpoints parses a Kubernetes Endpoints resource +func ParseEndpoints(ep *slim_corev1.Endpoints) *Endpoints { + endpoints := newEndpoints() + endpoints.ObjectMeta = ep.ObjectMeta + + for _, sub := range ep.Subsets { + for _, addr := range sub.Addresses { + addrCluster, err := cmtypes.ParseAddrCluster(addr.IP) + if err != nil { + continue + } + + backend, ok := endpoints.Backends[addrCluster] + if !ok { + backend = &Backend{Ports: serviceStore.PortConfiguration{}} + endpoints.Backends[addrCluster] = backend + } + + if addr.NodeName != nil { + backend.NodeName = *addr.NodeName + } + + for _, port := range sub.Ports { + lbPort := loadbalancer.NewL4Addr(loadbalancer.L4Type(port.Protocol), uint16(port.Port)) + backend.Ports[port.Name] = lbPort + } + } + } + + endpoints.EndpointSliceID = ParseEndpointsID(ep) + return endpoints +} + +type endpointSlice interface { + GetNamespace() string + GetName() string + GetLabels() map[string]string +} + +// ParseEndpointSliceID parses a Kubernetes endpoints slice and returns a +// EndpointSliceID +func ParseEndpointSliceID(es endpointSlice) EndpointSliceID { + return EndpointSliceID{ + ServiceID: ServiceID{ + Name: es.GetLabels()[slim_discovery_v1.LabelServiceName], + Namespace: es.GetNamespace(), + }, + EndpointSliceName: es.GetName(), + } +} + +// ParseEndpointSliceV1Beta1 parses a Kubernetes EndpointsSlice v1beta1 resource +// It reads ready and terminating state of endpoints in the EndpointSlice to +// return an EndpointSlice ID and a filtered list of Endpoints for service load-balancing. +func ParseEndpointSliceV1Beta1(ep *slim_discovery_v1beta1.EndpointSlice) *Endpoints { + endpoints := newEndpoints() + endpoints.ObjectMeta = ep.ObjectMeta + endpoints.EndpointSliceID = ParseEndpointSliceID(ep) + + // Validate AddressType before parsing. Currently, we only support IPv4 and IPv6. + if ep.AddressType != slim_discovery_v1beta1.AddressTypeIPv4 && + ep.AddressType != slim_discovery_v1beta1.AddressTypeIPv6 { + return endpoints + } + + for _, sub := range ep.Endpoints { + skipEndpoint := false + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // More info: vendor/k8s.io/api/discovery/v1beta1/types.go + if sub.Conditions.Ready != nil && !*sub.Conditions.Ready { + skipEndpoint = true + if option.Config.EnableK8sTerminatingEndpoint { + // Terminating indicates that the endpoint is getting terminated. A + // nil values indicates an unknown state. Ready is never true when + // an endpoint is terminating. Propagate the terminating endpoint + // state so that we can gracefully remove those endpoints. + // More details : vendor/k8s.io/api/discovery/v1/types.go + if sub.Conditions.Terminating != nil && *sub.Conditions.Terminating { + skipEndpoint = false + } + } + } + if skipEndpoint { + continue + } + for _, addr := range sub.Addresses { + addrCluster, err := cmtypes.ParseAddrCluster(addr) + if err != nil { + continue + } + + backend, ok := endpoints.Backends[addrCluster] + if !ok { + backend = &Backend{Ports: serviceStore.PortConfiguration{}} + endpoints.Backends[addrCluster] = backend + if nodeName, ok := sub.Topology["kubernetes.io/hostname"]; ok { + backend.NodeName = nodeName + } + if option.Config.EnableK8sTerminatingEndpoint { + if sub.Conditions.Terminating != nil && *sub.Conditions.Terminating { + backend.Terminating = true + metrics.TerminatingEndpointsEvents.Inc() + } + } + } + + for _, port := range ep.Ports { + name, lbPort := parseEndpointPortV1Beta1(port) + if lbPort != nil { + backend.Ports[name] = lbPort + } + } + } + } + return endpoints +} + +// parseEndpointPortV1Beta1 returns the port name and the port parsed as a +// L4Addr from the given port. +func parseEndpointPortV1Beta1(port slim_discovery_v1beta1.EndpointPort) (string, *loadbalancer.L4Addr) { + proto := loadbalancer.TCP + if port.Protocol != nil { + switch *port.Protocol { + case slim_corev1.ProtocolTCP: + proto = loadbalancer.TCP + case slim_corev1.ProtocolUDP: + proto = loadbalancer.UDP + case slim_corev1.ProtocolSCTP: + proto = loadbalancer.SCTP + default: + return "", nil + } + } + if port.Port == nil { + return "", nil + } + var name string + if port.Name != nil { + name = *port.Name + } + lbPort := loadbalancer.NewL4Addr(proto, uint16(*port.Port)) + return name, lbPort +} + +// ParseEndpointSliceV1 parses a Kubernetes EndpointSlice resource. +// It reads ready and terminating state of endpoints in the EndpointSlice to +// return an EndpointSlice ID and a filtered list of Endpoints for service load-balancing. +func ParseEndpointSliceV1(ep *slim_discovery_v1.EndpointSlice) *Endpoints { + endpoints := newEndpoints() + endpoints.ObjectMeta = ep.ObjectMeta + endpoints.EndpointSliceID = ParseEndpointSliceID(ep) + + // Validate AddressType before parsing. Currently, we only support IPv4 and IPv6. + if ep.AddressType != slim_discovery_v1.AddressTypeIPv4 && + ep.AddressType != slim_discovery_v1.AddressTypeIPv6 { + return endpoints + } + + log.Debugf("Processing %d endpoints for EndpointSlice %s", len(ep.Endpoints), ep.Name) + for _, sub := range ep.Endpoints { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // More info: vendor/k8s.io/api/discovery/v1/types.go + isReady := sub.Conditions.Ready == nil || *sub.Conditions.Ready + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. + // More info: vendor/k8s.io/api/discovery/v1/types.go + isServing := (sub.Conditions.Serving == nil && isReady) || (sub.Conditions.Serving != nil && *sub.Conditions.Serving) + // Terminating indicates that the endpoint is getting terminated. A + // nil values indicates an unknown state. Ready is never true when + // an endpoint is terminating. Propagate the terminating endpoint + // state so that we can gracefully remove those endpoints. + // More info: vendor/k8s.io/api/discovery/v1/types.go + isTerminating := sub.Conditions.Terminating != nil && *sub.Conditions.Terminating + + // if is not Ready and EnableK8sTerminatingEndpoint is set + // allow endpoints that are Serving and Terminating + if !isReady { + if !option.Config.EnableK8sTerminatingEndpoint { + log.Debugf("discarding Endpoint on EndpointSlice %s: not Ready and EnableK8sTerminatingEndpoint %v", ep.Name, option.Config.EnableK8sTerminatingEndpoint) + continue + } + // filter not Serving endpoints since those can not receive traffic + if !isServing { + log.Debugf("discarding Endpoint on EndpointSlice %s: not Serving and EnableK8sTerminatingEndpoint %v", ep.Name, option.Config.EnableK8sTerminatingEndpoint) + continue + } + } + + for _, addr := range sub.Addresses { + addrCluster, err := cmtypes.ParseAddrCluster(addr) + if err != nil { + log.WithError(err).Infof("Unable to parse address %s for EndpointSlices %s", addr, ep.Name) + continue + } + + backend, ok := endpoints.Backends[addrCluster] + if !ok { + backend = &Backend{Ports: serviceStore.PortConfiguration{}} + endpoints.Backends[addrCluster] = backend + if sub.NodeName != nil { + backend.NodeName = *sub.NodeName + } else { + if nodeName, ok := sub.DeprecatedTopology["kubernetes.io/hostname"]; ok { + backend.NodeName = nodeName + } + } + // If is not ready check if is serving and terminating + if !isReady && option.Config.EnableK8sTerminatingEndpoint && + isServing && isTerminating { + log.Debugf("Endpoint address %s on EndpointSlice %s is Terminating", addr, ep.Name) + backend.Terminating = true + metrics.TerminatingEndpointsEvents.Inc() + } + } + + for _, port := range ep.Ports { + name, lbPort := parseEndpointPortV1(port) + if lbPort != nil { + backend.Ports[name] = lbPort + } + } + if sub.Hints != nil && (*sub.Hints).ForZones != nil { + hints := (*sub.Hints).ForZones + backend.HintsForZones = make([]string, len(hints)) + for i, hint := range hints { + backend.HintsForZones[i] = hint.Name + } + } + } + } + + log.Debugf("EndpointSlice %s has %d backends", ep.Name, len(endpoints.Backends)) + return endpoints +} + +// parseEndpointPortV1 returns the port name and the port parsed as a L4Addr from +// the given port. +func parseEndpointPortV1(port slim_discovery_v1.EndpointPort) (string, *loadbalancer.L4Addr) { + proto := loadbalancer.TCP + if port.Protocol != nil { + switch *port.Protocol { + case slim_corev1.ProtocolTCP: + proto = loadbalancer.TCP + case slim_corev1.ProtocolUDP: + proto = loadbalancer.UDP + case slim_corev1.ProtocolSCTP: + proto = loadbalancer.SCTP + default: + return "", nil + } + } + if port.Port == nil { + return "", nil + } + var name string + if port.Name != nil { + name = *port.Name + } + lbPort := loadbalancer.NewL4Addr(proto, uint16(*port.Port)) + return name, lbPort +} + +// EndpointSlices is the collection of all endpoint slices of a service. +// The map key is the name of the endpoint slice or the name of the legacy +// v1.Endpoint. The endpoints stored here are not namespaced since this +// structure is only used as a value of another map that is already namespaced. +// (see ServiceCache.endpoints). +// +// +deepequal-gen=true +type EndpointSlices struct { + epSlices map[string]*Endpoints +} + +// newEndpointsSlices returns a new EndpointSlices +func newEndpointsSlices() *EndpointSlices { + return &EndpointSlices{ + epSlices: map[string]*Endpoints{}, + } +} + +// GetEndpoints returns a read only a single *Endpoints structure with all +// Endpoints' backends joined. +func (es *EndpointSlices) GetEndpoints() *Endpoints { + if es == nil || len(es.epSlices) == 0 { + return nil + } + allEps := newEndpoints() + for _, eps := range es.epSlices { + for backend, ep := range eps.Backends { + // EndpointSlices may have duplicate addresses on different slices. + // kubectl get endpointslices -n endpointslicemirroring-4896 + // NAME ADDRESSTYPE PORTS ENDPOINTS AGE + // example-custom-endpoints-f6z84 IPv4 9090 10.244.1.49 28s + // example-custom-endpoints-g6r6v IPv4 8090 10.244.1.49 28s + b, ok := allEps.Backends[backend] + if !ok { + allEps.Backends[backend] = ep.DeepCopy() + } else { + clone := b.DeepCopy() + for k, v := range ep.Ports { + clone.Ports[k] = v + } + allEps.Backends[backend] = clone + } + } + } + return allEps +} + +// Upsert maps the 'esname' to 'e'. +// - 'esName': Name of the Endpoint Slice +// - 'e': Endpoints to store in the map +func (es *EndpointSlices) Upsert(esName string, e *Endpoints) { + if es == nil { + panic("BUG: EndpointSlices is nil") + } + es.epSlices[esName] = e +} + +// Delete deletes the endpoint slice in the internal map. Returns true if there +// are not any more endpoints available in the map. +func (es *EndpointSlices) Delete(esName string) bool { + if es == nil || len(es.epSlices) == 0 { + return true + } + delete(es.epSlices, esName) + return len(es.epSlices) == 0 +} + +// externalEndpoints is the collection of external endpoints in all remote +// clusters. The map key is the name of the remote cluster. +type externalEndpoints struct { + endpoints map[string]*Endpoints +} + +// newExternalEndpoints returns a new ExternalEndpoints +func newExternalEndpoints() externalEndpoints { + return externalEndpoints{ + endpoints: map[string]*Endpoints{}, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/error_helpers.go b/vendor/github.com/cilium/cilium/pkg/k8s/error_helpers.go new file mode 100644 index 0000000000..8c56f6baff --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/error_helpers.go @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "strings" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +var ( + + // k8sErrMsgMU guards additions and removals to k8sErrMsg, which stores a + // time after which a repeat error message can be printed + k8sErrMsgMU lock.Mutex + k8sErrMsg = map[string]time.Time{} +) + +const k8sErrLogTimeout = time.Minute + +// k8sErrorUpdateCheckUnmuteTime returns a boolean indicating whether we should +// log errmsg or not. It manages once-per-k8sErrLogTimeout entry in k8sErrMsg. +// When errmsg is new or more than k8sErrLogTimeout has passed since the last +// invocation that returned true, it returns true. +func k8sErrorUpdateCheckUnmuteTime(errstr string, now time.Time) bool { + k8sErrMsgMU.Lock() + defer k8sErrMsgMU.Unlock() + + if unmuteDeadline, ok := k8sErrMsg[errstr]; !ok || now.After(unmuteDeadline) { + k8sErrMsg[errstr] = now.Add(k8sErrLogTimeout) + return true + } + + return false +} + +// K8sErrorHandler handles the error messages in a non verbose way by omitting +// repeated instances of the same error message for a timeout defined with +// k8sErrLogTimeout. +func K8sErrorHandler(e error) { + if e == nil { + return + } + + // We rate-limit certain categories of error message. These are matched + // below, with a default behaviour to print everything else without + // rate-limiting. + // Note: We also have side-effects in some of the special cases. + now := time.Now() + errstr := e.Error() + switch { + // This can occur when cilium comes up before the k8s API server, and keeps + // trying to connect. + case strings.Contains(errstr, "connection refused"): + if k8sErrorUpdateCheckUnmuteTime(errstr, now) { + log.WithError(e).Error("k8sError") + } + + // k8s does not allow us to watch both ThirdPartyResource and + // CustomResourceDefinition. This would occur when a user mixes these within + // the k8s cluster, and might occur when upgrading from versions of cilium + // that used ThirdPartyResource to define CiliumNetworkPolicy. + case strings.Contains(errstr, "Failed to list *v2.CiliumNetworkPolicy: the server could not find the requested resource"): + if k8sErrorUpdateCheckUnmuteTime(errstr, now) { + log.WithError(e).Error("No Cilium Network Policy CRD defined in the cluster, please set `--skip-crd-creation=false` to avoid seeing this error.") + } + + // fromCIDR and toCIDR used to expect an "ip" subfield (so, they were a YAML + // map with one field) but common usage and expectation would simply list the + // CIDR ranges and IPs desired as a YAML list. In these cases we would see + // this decode error. We have since changed the definition to be a simple + // list of strings. + case strings.Contains(errstr, "Unable to decode an event from the watch stream: unable to decode watch event"), + strings.Contains(errstr, "Failed to list *v1.CiliumNetworkPolicy: only encoded map or array can be decoded into a struct"), + strings.Contains(errstr, "Failed to list *v2.CiliumNetworkPolicy: only encoded map or array can be decoded into a struct"), + strings.Contains(errstr, "Failed to list *v2.CiliumNetworkPolicy: v2.CiliumNetworkPolicyList:"): + if k8sErrorUpdateCheckUnmuteTime(errstr, now) { + log.WithError(e).Error("Unable to decode k8s watch event") + } + + default: + log.WithError(e).Error("k8sError") + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/factory_functions.go b/vendor/github.com/cilium/cilium/pkg/k8s/factory_functions.go new file mode 100644 index 0000000000..a1888db475 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/factory_functions.go @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/client-go/tools/cache" + + "github.com/cilium/cilium/pkg/comparator" + dpTypes "github.com/cilium/cilium/pkg/datapath/types" + cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + cilium_v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// CastInformerEvent tries to cast obj to type typ, directly +// or by DeletedFinalStateUnknown type. It returns nil and logs +// an error if obj doesn't contain type typ. +func CastInformerEvent[typ any](obj interface{}) *typ { + k8sObj, ok := obj.(*typ) + if ok { + return k8sObj + } + deletedObj, ok := obj.(cache.DeletedFinalStateUnknown) + if ok { + // Delete was not observed by the watcher but is + // removed from kube-apiserver. This is the last + // known state and the object no longer exists. + k8sObj, ok := deletedObj.Obj.(*typ) + if ok { + return k8sObj + } + } + log.WithField(logfields.Object, logfields.Repr(obj)). + Warnf("Ignoring invalid type, expected: %T", new(typ)) + return nil +} + +func EqualV1Services(k8sSVC1, k8sSVC2 *slim_corev1.Service, nodeAddressing dpTypes.NodeAddressing) bool { + // Service annotations are used to mark services as global, shared, etc. + if !comparator.MapStringEquals(k8sSVC1.GetAnnotations(), k8sSVC2.GetAnnotations()) { + return false + } + + svcID1, svc1 := ParseService(k8sSVC1, nodeAddressing) + svcID2, svc2 := ParseService(k8sSVC2, nodeAddressing) + + if svcID1 != svcID2 { + return false + } + + // Please write all the equalness logic inside the K8sServiceInfo.Equals() + // method. + return svc1.DeepEqual(svc2) +} + +// AnnotationsEqual returns whether the annotation with any key in +// relevantAnnotations is equal in anno1 and anno2. +func AnnotationsEqual(relevantAnnotations []string, anno1, anno2 map[string]string) bool { + for _, an := range relevantAnnotations { + if anno1[an] != anno2[an] { + return false + } + } + return true +} + +func convertToK8sServicePorts(ports []v1.ServicePort) []slim_corev1.ServicePort { + if ports == nil { + return nil + } + + slimPorts := make([]slim_corev1.ServicePort, 0, len(ports)) + for _, v1Port := range ports { + slimPorts = append(slimPorts, + slim_corev1.ServicePort{ + Name: v1Port.Name, + Protocol: slim_corev1.Protocol(v1Port.Protocol), + Port: v1Port.Port, + NodePort: v1Port.NodePort, + }, + ) + } + return slimPorts +} + +func ConvertToK8sV1ServicePorts(slimPorts []slim_corev1.ServicePort) []v1.ServicePort { + if slimPorts == nil { + return nil + } + + ports := make([]v1.ServicePort, 0, len(slimPorts)) + for _, port := range slimPorts { + ports = append(ports, + v1.ServicePort{ + Name: port.Name, + Protocol: v1.Protocol(port.Protocol), + Port: port.Port, + NodePort: port.NodePort, + }, + ) + } + return ports +} + +func convertToK8sServiceAffinityConfig(saCfg *v1.SessionAffinityConfig) *slim_corev1.SessionAffinityConfig { + if saCfg == nil { + return nil + } + + if saCfg.ClientIP == nil { + return &slim_corev1.SessionAffinityConfig{} + } + + return &slim_corev1.SessionAffinityConfig{ + ClientIP: &slim_corev1.ClientIPConfig{ + TimeoutSeconds: saCfg.ClientIP.TimeoutSeconds, + }, + } +} + +func ConvertToK8sV1ServiceAffinityConfig(saCfg *slim_corev1.SessionAffinityConfig) *v1.SessionAffinityConfig { + if saCfg == nil { + return nil + } + + if saCfg.ClientIP == nil { + return &v1.SessionAffinityConfig{} + } + + return &v1.SessionAffinityConfig{ + ClientIP: &v1.ClientIPConfig{ + TimeoutSeconds: saCfg.ClientIP.TimeoutSeconds, + }, + } +} + +func convertToK8sLoadBalancerIngress(lbIngs []v1.LoadBalancerIngress) []slim_corev1.LoadBalancerIngress { + if lbIngs == nil { + return nil + } + + slimLBIngs := make([]slim_corev1.LoadBalancerIngress, 0, len(lbIngs)) + for _, lbIng := range lbIngs { + slimLBIngs = append(slimLBIngs, + slim_corev1.LoadBalancerIngress{ + IP: lbIng.IP, + }, + ) + } + return slimLBIngs +} + +func ConvertToK8sV1LoadBalancerIngress(slimLBIngs []slim_corev1.LoadBalancerIngress) []v1.LoadBalancerIngress { + if slimLBIngs == nil { + return nil + } + + lbIngs := make([]v1.LoadBalancerIngress, 0, len(slimLBIngs)) + for _, lbIng := range slimLBIngs { + var ports []v1.PortStatus + for _, port := range lbIng.Ports { + ports = append(ports, v1.PortStatus{ + Port: port.Port, + Protocol: v1.Protocol(port.Protocol), + Error: port.Error, + }) + } + lbIngs = append(lbIngs, + v1.LoadBalancerIngress{ + IP: lbIng.IP, + Hostname: lbIng.Hostname, + Ports: ports, + }, + ) + } + return lbIngs +} + +func ConvertToNetworkV1IngressLoadBalancerIngress(slimLBIngs []slim_corev1.LoadBalancerIngress) []networkingv1.IngressLoadBalancerIngress { + if slimLBIngs == nil { + return nil + } + + ingLBIngs := make([]networkingv1.IngressLoadBalancerIngress, 0, len(slimLBIngs)) + for _, lbIng := range slimLBIngs { + ports := make([]networkingv1.IngressPortStatus, 0, len(lbIng.Ports)) + for _, port := range lbIng.Ports { + ports = append(ports, networkingv1.IngressPortStatus{ + Port: port.Port, + Protocol: v1.Protocol(port.Protocol), + Error: port.Error, + }) + } + ingLBIngs = append(ingLBIngs, + networkingv1.IngressLoadBalancerIngress{ + IP: lbIng.IP, + Hostname: lbIng.Hostname, + Ports: ports, + }) + } + return ingLBIngs +} + +// TransformToK8sService transforms a *v1.Service into a *slim_corev1.Service +// or a cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown +// with a *slim_corev1.Service in its Obj. If obj is a *slim_corev1.Service +// or a cache.DeletedFinalStateUnknown with a *slim_corev1.Service in its Obj, +// obj is returned without any transformations. If the given obj can't be cast +// into either *slim_corev1.Service nor cache.DeletedFinalStateUnknown, an error +// is returned. +func TransformToK8sService(obj interface{}) (interface{}, error) { + switch concreteObj := obj.(type) { + case *v1.Service: + return &slim_corev1.Service{ + TypeMeta: slim_metav1.TypeMeta{ + Kind: concreteObj.TypeMeta.Kind, + APIVersion: concreteObj.TypeMeta.APIVersion, + }, + ObjectMeta: slim_metav1.ObjectMeta{ + Name: concreteObj.ObjectMeta.Name, + Namespace: concreteObj.ObjectMeta.Namespace, + ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, + UID: concreteObj.ObjectMeta.UID, + Labels: concreteObj.ObjectMeta.Labels, + Annotations: concreteObj.ObjectMeta.Annotations, + }, + Spec: slim_corev1.ServiceSpec{ + Ports: convertToK8sServicePorts(concreteObj.Spec.Ports), + Selector: concreteObj.Spec.Selector, + ClusterIP: concreteObj.Spec.ClusterIP, + Type: slim_corev1.ServiceType(concreteObj.Spec.Type), + ExternalIPs: concreteObj.Spec.ExternalIPs, + SessionAffinity: slim_corev1.ServiceAffinity(concreteObj.Spec.SessionAffinity), + LoadBalancerIP: concreteObj.Spec.LoadBalancerIP, + ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(concreteObj.Spec.ExternalTrafficPolicy), + HealthCheckNodePort: concreteObj.Spec.HealthCheckNodePort, + SessionAffinityConfig: convertToK8sServiceAffinityConfig(concreteObj.Spec.SessionAffinityConfig), + }, + Status: slim_corev1.ServiceStatus{ + LoadBalancer: slim_corev1.LoadBalancerStatus{ + Ingress: convertToK8sLoadBalancerIngress(concreteObj.Status.LoadBalancer.Ingress), + }, + }, + }, nil + case *slim_corev1.Service: + return obj, nil + case cache.DeletedFinalStateUnknown: + if _, ok := concreteObj.Obj.(*slim_corev1.Service); ok { + return obj, nil + } + svc, ok := concreteObj.Obj.(*v1.Service) + if !ok { + return nil, fmt.Errorf("unknown object type %T", concreteObj.Obj) + } + return cache.DeletedFinalStateUnknown{ + Key: concreteObj.Key, + Obj: &slim_corev1.Service{ + TypeMeta: slim_metav1.TypeMeta{ + Kind: svc.TypeMeta.Kind, + APIVersion: svc.TypeMeta.APIVersion, + }, + ObjectMeta: slim_metav1.ObjectMeta{ + Name: svc.ObjectMeta.Name, + Namespace: svc.ObjectMeta.Namespace, + ResourceVersion: svc.ObjectMeta.ResourceVersion, + UID: svc.ObjectMeta.UID, + Labels: svc.ObjectMeta.Labels, + Annotations: svc.ObjectMeta.Annotations, + }, + Spec: slim_corev1.ServiceSpec{ + Ports: convertToK8sServicePorts(svc.Spec.Ports), + Selector: svc.Spec.Selector, + ClusterIP: svc.Spec.ClusterIP, + Type: slim_corev1.ServiceType(svc.Spec.Type), + ExternalIPs: svc.Spec.ExternalIPs, + SessionAffinity: slim_corev1.ServiceAffinity(svc.Spec.SessionAffinity), + LoadBalancerIP: svc.Spec.LoadBalancerIP, + ExternalTrafficPolicy: slim_corev1.ServiceExternalTrafficPolicyType(svc.Spec.ExternalTrafficPolicy), + HealthCheckNodePort: svc.Spec.HealthCheckNodePort, + SessionAffinityConfig: convertToK8sServiceAffinityConfig(svc.Spec.SessionAffinityConfig), + }, + Status: slim_corev1.ServiceStatus{ + LoadBalancer: slim_corev1.LoadBalancerStatus{ + Ingress: convertToK8sLoadBalancerIngress(svc.Status.LoadBalancer.Ingress), + }, + }, + }, + }, nil + default: + return nil, fmt.Errorf("unknown object type %T", concreteObj) + } +} + +// TransformToCCNP transforms a *cilium_v2.CiliumClusterwideNetworkPolicy into a +// *types.SlimCNP without the Status field of the given CNP, or a +// cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a +// *types.SlimCNP, also without the Status field of the given CNP, in its Obj. +// If obj is a *types.SlimCNP or a cache.DeletedFinalStateUnknown with a *types.SlimCNP +// in its Obj, obj is returned without any transformations. If the given obj can't be +// cast into either *cilium_v2.CiliumClusterwideNetworkPolicy nor +// cache.DeletedFinalStateUnknown, an error is returned. +func TransformToCCNP(obj interface{}) (interface{}, error) { + switch concreteObj := obj.(type) { + case *cilium_v2.CiliumClusterwideNetworkPolicy: + return &types.SlimCNP{ + CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ + TypeMeta: concreteObj.TypeMeta, + ObjectMeta: concreteObj.ObjectMeta, + Spec: concreteObj.Spec, + Specs: concreteObj.Specs, + }, + }, nil + case *types.SlimCNP: + return obj, nil + case cache.DeletedFinalStateUnknown: + if _, ok := concreteObj.Obj.(*types.SlimCNP); ok { + return obj, nil + } + ccnp, ok := concreteObj.Obj.(*cilium_v2.CiliumClusterwideNetworkPolicy) + if !ok { + return nil, fmt.Errorf("unknown object type %T", concreteObj.Obj) + } + slimCNP := &types.SlimCNP{ + CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ + TypeMeta: ccnp.TypeMeta, + ObjectMeta: ccnp.ObjectMeta, + Spec: ccnp.Spec, + Specs: ccnp.Specs, + }, + } + dfsu := cache.DeletedFinalStateUnknown{ + Key: concreteObj.Key, + Obj: slimCNP, + } + return dfsu, nil + + default: + return nil, fmt.Errorf("unknown object type %T", concreteObj) + } +} + +// TransformToCNP transforms a *cilium_v2.CiliumNetworkPolicy into a +// *types.SlimCNP without the Status field of the given CNP, or a +// cache.DeletedFinalStateUnknown into a cache.DeletedFinalStateUnknown with a +// *types.SlimCNP, also without the Status field of the given CNP, in its Obj. +// If obj is a *types.SlimCNP or a cache.DeletedFinalStateUnknown with a +// *types.SlimCNP in its Obj, obj is returned without any transformations. +// If the given obj can't be cast into either *cilium_v2.CiliumNetworkPolicy +// nor cache.DeletedFinalStateUnknown, an error is returned. +func TransformToCNP(obj interface{}) (interface{}, error) { + switch concreteObj := obj.(type) { + case *cilium_v2.CiliumNetworkPolicy: + return &types.SlimCNP{ + CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ + TypeMeta: concreteObj.TypeMeta, + ObjectMeta: concreteObj.ObjectMeta, + Spec: concreteObj.Spec, + Specs: concreteObj.Specs, + }, + }, nil + case *types.SlimCNP: + return obj, nil + case cache.DeletedFinalStateUnknown: + if _, ok := concreteObj.Obj.(*types.SlimCNP); ok { + return obj, nil + } + cnp, ok := concreteObj.Obj.(*cilium_v2.CiliumNetworkPolicy) + if !ok { + return nil, fmt.Errorf("unknown object type %T", concreteObj.Obj) + } + return cache.DeletedFinalStateUnknown{ + Key: concreteObj.Key, + Obj: &types.SlimCNP{ + CiliumNetworkPolicy: &cilium_v2.CiliumNetworkPolicy{ + TypeMeta: cnp.TypeMeta, + ObjectMeta: cnp.ObjectMeta, + Spec: cnp.Spec, + Specs: cnp.Specs, + }, + }, + }, nil + default: + return nil, fmt.Errorf("unknown object type %T", concreteObj) + } +} + +func convertToAddress(v1Addrs []v1.NodeAddress) []slim_corev1.NodeAddress { + if v1Addrs == nil { + return nil + } + + addrs := make([]slim_corev1.NodeAddress, 0, len(v1Addrs)) + for _, addr := range v1Addrs { + addrs = append( + addrs, + slim_corev1.NodeAddress{ + Type: slim_corev1.NodeAddressType(addr.Type), + Address: addr.Address, + }, + ) + } + return addrs +} + +func convertToTaints(v1Taints []v1.Taint) []slim_corev1.Taint { + if v1Taints == nil { + return nil + } + + taints := make([]slim_corev1.Taint, 0, len(v1Taints)) + for _, taint := range v1Taints { + var ta *slim_metav1.Time + if taint.TimeAdded != nil { + t := slim_metav1.NewTime(taint.TimeAdded.Time) + ta = &t + } + taints = append( + taints, + slim_corev1.Taint{ + Key: taint.Key, + Value: taint.Value, + Effect: slim_corev1.TaintEffect(taint.Effect), + TimeAdded: ta, + }, + ) + } + return taints +} + +// TransformToCiliumEndpoint transforms a *cilium_v2.CiliumEndpoint into a +// *types.CiliumEndpoint or a cache.DeletedFinalStateUnknown into a +// cache.DeletedFinalStateUnknown with a *types.CiliumEndpoint in its Obj. +// If obj is a *types.CiliumEndpoint or a cache.DeletedFinalStateUnknown with +// a *types.CiliumEndpoint in its Obj, obj is returned without any transformations. +// If the given obj can't be cast into either *cilium_v2.CiliumEndpoint nor +// cache.DeletedFinalStateUnknown, an error is returned. +func TransformToCiliumEndpoint(obj interface{}) (interface{}, error) { + switch concreteObj := obj.(type) { + case *cilium_v2.CiliumEndpoint: + return &types.CiliumEndpoint{ + TypeMeta: slim_metav1.TypeMeta{ + Kind: concreteObj.TypeMeta.Kind, + APIVersion: concreteObj.TypeMeta.APIVersion, + }, + ObjectMeta: slim_metav1.ObjectMeta{ + Name: concreteObj.ObjectMeta.Name, + Namespace: concreteObj.ObjectMeta.Namespace, + UID: concreteObj.ObjectMeta.UID, + ResourceVersion: concreteObj.ObjectMeta.ResourceVersion, + // We don't need to store labels nor annotations because + // they are not used by the CEP handlers. + Labels: nil, + Annotations: nil, + }, + Encryption: func() *cilium_v2.EncryptionSpec { + enc := concreteObj.Status.Encryption + return &enc + }(), + Identity: concreteObj.Status.Identity, + Networking: concreteObj.Status.Networking, + NamedPorts: concreteObj.Status.NamedPorts, + }, nil + case *types.CiliumEndpoint: + return obj, nil + case cache.DeletedFinalStateUnknown: + if _, ok := concreteObj.Obj.(*types.CiliumEndpoint); ok { + return obj, nil + } + ciliumEndpoint, ok := concreteObj.Obj.(*cilium_v2.CiliumEndpoint) + if !ok { + return nil, fmt.Errorf("unknown object type %T", concreteObj.Obj) + } + return cache.DeletedFinalStateUnknown{ + Key: concreteObj.Key, + Obj: &types.CiliumEndpoint{ + TypeMeta: slim_metav1.TypeMeta{ + Kind: ciliumEndpoint.TypeMeta.Kind, + APIVersion: ciliumEndpoint.TypeMeta.APIVersion, + }, + ObjectMeta: slim_metav1.ObjectMeta{ + Name: ciliumEndpoint.ObjectMeta.Name, + Namespace: ciliumEndpoint.ObjectMeta.Namespace, + UID: ciliumEndpoint.ObjectMeta.UID, + ResourceVersion: ciliumEndpoint.ObjectMeta.ResourceVersion, + // We don't need to store labels nor annotations because + // they are not used by the CEP handlers. + Labels: nil, + Annotations: nil, + }, + Encryption: func() *cilium_v2.EncryptionSpec { + enc := ciliumEndpoint.Status.Encryption + return &enc + }(), + Identity: ciliumEndpoint.Status.Identity, + Networking: ciliumEndpoint.Status.Networking, + NamedPorts: ciliumEndpoint.Status.NamedPorts, + }, + }, nil + default: + return nil, fmt.Errorf("unknown object type %T", concreteObj) + } +} + +// ConvertCEPToCoreCEP converts a CiliumEndpoint to a CoreCiliumEndpoint +// containing only a minimal set of entities used to +func ConvertCEPToCoreCEP(cep *cilium_v2.CiliumEndpoint) *cilium_v2alpha1.CoreCiliumEndpoint { + // Copy Networking field into core CEP + var epNetworking *cilium_v2.EndpointNetworking + if cep.Status.Networking != nil { + epNetworking = new(cilium_v2.EndpointNetworking) + cep.Status.Networking.DeepCopyInto(epNetworking) + } + var identityID int64 = 0 + if cep.Status.Identity != nil { + identityID = cep.Status.Identity.ID + } + return &cilium_v2alpha1.CoreCiliumEndpoint{ + Name: cep.GetName(), + Networking: epNetworking, + Encryption: cep.Status.Encryption, + IdentityID: identityID, + NamedPorts: cep.Status.NamedPorts.DeepCopy(), + } +} + +// ConvertCoreCiliumEndpointToTypesCiliumEndpoint converts CoreCiliumEndpoint object to types.CiliumEndpoint. +func ConvertCoreCiliumEndpointToTypesCiliumEndpoint(ccep *cilium_v2alpha1.CoreCiliumEndpoint, ns string) *types.CiliumEndpoint { + return &types.CiliumEndpoint{ + ObjectMeta: slim_metav1.ObjectMeta{ + Name: ccep.Name, + Namespace: ns, + }, + Encryption: func() *cilium_v2.EncryptionSpec { + enc := ccep.Encryption + return &enc + }(), + Identity: &cilium_v2.EndpointIdentity{ + ID: ccep.IdentityID, + }, + Networking: ccep.Networking, + NamedPorts: ccep.NamedPorts, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go new file mode 100644 index 0000000000..9386a68eb6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package identitybackend + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + "github.com/cilium/cilium/pkg/allocator" + cacheKey "github.com/cilium/cilium/pkg/identity/key" + "github.com/cilium/cilium/pkg/idpool" + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + "github.com/cilium/cilium/pkg/k8s/informer" + k8sUtils "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/rate" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "crd-allocator") +) + +const ( + // HeartBeatAnnotation is an annotation applied by the operator to indicate + // that a CiliumIdentity has been marked for deletion. + HeartBeatAnnotation = "io.cilium.heartbeat" + + k8sPrefix = labels.LabelSourceK8s + ":" + k8sNamespaceLabelPrefix = labels.LabelSourceK8s + ":" + k8sConst.PodNamespaceMetaLabels + labels.PathDelimiter + + // byKeyIndex is the name of the index of the identities by key. + byKeyIndex = "by-key-index" +) + +func NewCRDBackend(c CRDBackendConfiguration) (allocator.Backend, error) { + return &crdBackend{CRDBackendConfiguration: c}, nil +} + +type CRDBackendConfiguration struct { + Store cache.Indexer + Client clientset.Interface + KeyFunc func(map[string]string) allocator.AllocatorKey +} + +type crdBackend struct { + CRDBackendConfiguration +} + +func (c *crdBackend) DeleteAllKeys(ctx context.Context) { +} + +// sanitizeK8sLabels strips the 'k8s:' prefix in the labels generated by +// AllocatorKey.GetAsMap (when the key is k8s labels). In the CRD identity case +// we map the labels directly to the ciliumidentity CRD instance, and +// kubernetes does not allow ':' in the name of the label. These labels are not +// the canonical labels of the identity, but used to ease interaction with the +// CRD object. +func sanitizeK8sLabels(old map[string]string) (selected, skipped map[string]string) { + skipped = make(map[string]string, len(old)) + selected = make(map[string]string, len(old)) + for k, v := range old { + // Skip non-k8s labels. + // Skip synthesized labels for k8s namespace labels, since they contain user input which can result in the label + // name being longer than 63 characters. + if !strings.HasPrefix(k, k8sPrefix) || strings.HasPrefix(k, k8sNamespaceLabelPrefix) { + skipped[k] = v + continue // skip non-k8s labels + } + k = strings.TrimPrefix(k, k8sPrefix) // k8s: is redundant + selected[k] = v + } + return selected, skipped +} + +// AllocateID will create an identity CRD, thus creating the identity for this +// key-> ID mapping. +// Note: the lock field is not supported with the k8s CRD allocator. +// Returns an allocator key with the cilium identity stored in it. +func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) { + selectedLabels, skippedLabels := sanitizeK8sLabels(key.GetAsMap()) + log.WithField(logfields.Labels, skippedLabels).Info("Skipped non-kubernetes labels when labelling ciliumidentity. All labels will still be used in identity determination") + + identity := &v2.CiliumIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: id.String(), + Labels: selectedLabels, + }, + SecurityLabels: key.GetAsMap(), + } + + ci, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return key.PutValue(cacheKey.MetadataKeyBackendKey, ci), nil +} + +func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) { + return c.AllocateID(ctx, id, key) +} + +// AcquireReference acquires a reference to the identity. +func (c *crdBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { + // For CiliumIdentity-based allocation, the reference counting is + // handled via CiliumEndpoint. Any CiliumEndpoint referring to a + // CiliumIdentity will keep the CiliumIdentity alive. However, + // there is a brief window where a CiliumEndpoint may not exist + // for a given CiliumIdentity (according to the operator), in + // which case the operator marks the CiliumIdentity for deletion. + // This checks to see if the CiliumIdentity has been marked for + // deletion and removes the mark so that the CiliumIdentity can + // be safely used. + // + // NOTE: A race against using a CiliumIdentity that might otherwise + // be (immediately) deleted is prevented by the operator logic that + // validates the ResourceVersion of the CiliumIdentity before deleting + // it. If a CiliumIdentity does (eventually) get deleted by the + // operator, the agent will then have a chance to recreate it. + var ( + ts string + ok bool + ) + // check to see if the cached copy of the identity + // has the annotation + ci, exists, err := c.getById(ctx, id) + if err != nil { + return err + } + if !exists { + // fall back to the key stored in the allocator key. If it's not present + // then return the error. + ci, ok = key.Value(cacheKey.MetadataKeyBackendKey).(*v2.CiliumIdentity) + if !ok { + return fmt.Errorf("identity (id:%q,key:%q) does not exist", id, key) + } + } + + ts, ok = ci.Annotations[HeartBeatAnnotation] + if ok { + log.WithField(logfields.Identity, ci).Infof("Identity marked for deletion (at %s); attempting to unmark it", ts) + ci = ci.DeepCopy() + delete(ci.Annotations, HeartBeatAnnotation) + _, err = c.Client.CiliumV2().CiliumIdentities().Update(ctx, ci, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil +} + +func (c *crdBackend) RunLocksGC(_ context.Context, _ map[string]kvstore.Value) (map[string]kvstore.Value, error) { + return nil, nil +} + +func (c *crdBackend) RunGC(context.Context, *rate.Limiter, map[string]uint64, idpool.ID, idpool.ID) (map[string]uint64, *allocator.GCStats, error) { + return nil, nil, nil +} + +// UpdateKey refreshes the reference that this node is using this key->ID +// mapping. It assumes that the identity already exists but will recreate it if +// reliablyMissing is true. +// Note: the lock field is not supported with the k8s CRD allocator. +func (c *crdBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error { + err := c.AcquireReference(ctx, id, key, nil) + if err == nil { + log.WithFields(logrus.Fields{ + logfields.Identity: id, + logfields.Labels: key, + }).Debug("Acquired reference for identity") + return nil + } + + // The CRD (aka the master key) is missing. Try to recover by recreating it + // if reliablyMissing is set. + log.WithError(err).WithFields(logrus.Fields{ + logfields.Identity: id, + logfields.Labels: key, + }).Warning("Unable update CRD identity information with a reference for this node") + + if reliablyMissing { + // Recreate a missing master key + if _, err = c.AllocateID(ctx, id, key); err != nil { + return fmt.Errorf("Unable recreate missing CRD identity %q->%q: %s", key, id, err) + } + + return nil + } + + return err +} + +func (c *crdBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error { + return c.UpdateKey(ctx, id, key, reliablyMissing) +} + +// Lock does not return a lock object. Locking is not supported with the k8s +// CRD allocator. It is here to meet interface requirements. +func (c *crdBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) { + return &crdLock{}, nil +} + +type crdLock struct{} + +// Unlock does not unlock a lock object. Locking is not supported with the k8s +// CRD allocator. It is here to meet interface requirements. +func (c *crdLock) Unlock(ctx context.Context) error { + return nil +} + +// Comparator does nothing. Locking is not supported with the k8s +// CRD allocator. It is here to meet interface requirements. +func (c *crdLock) Comparator() interface{} { + return nil +} + +// get returns the identity found for the given set of labels. +// In the case of duplicate entries, return an identity entry +// from a sorted list. +func (c *crdBackend) get(ctx context.Context, key allocator.AllocatorKey) *v2.CiliumIdentity { + if c.Store == nil { + return nil + } + + identities, err := c.Store.ByIndex(byKeyIndex, key.GetKey()) + if err != nil || len(identities) == 0 { + return nil + } + + sort.Slice(identities, func(i, j int) bool { + left, ok := identities[i].(*v2.CiliumIdentity) + if !ok { + return false + } + + right, ok := identities[j].(*v2.CiliumIdentity) + if !ok { + return false + } + + return left.CreationTimestamp.Before(&right.CreationTimestamp) + }) + + for _, identityObject := range identities { + identity, ok := identityObject.(*v2.CiliumIdentity) + if !ok { + return nil + } + + if reflect.DeepEqual(identity.SecurityLabels, key.GetAsMap()) { + return identity + } + } + return nil +} + +// Get returns the first ID which is allocated to a key in the identity CRDs in +// kubernetes. +// Note: the lock field is not supported with the k8s CRD allocator. +func (c *crdBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) { + identity := c.get(ctx, key) + if identity == nil { + return idpool.NoID, nil + } + + id, err := strconv.ParseUint(identity.Name, 10, 64) + if err != nil { + return idpool.NoID, fmt.Errorf("unable to parse value '%s': %s", identity.Name, err) + } + + return idpool.ID(id), nil +} + +func (c *crdBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) { + return c.Get(ctx, key) +} + +// getById fetches the identities from the local store. Returns a nil `err` and +// false `exists` if an Identity is not found for the given `id`. +func (c *crdBackend) getById(ctx context.Context, id idpool.ID) (idty *v2.CiliumIdentity, exists bool, err error) { + if c.Store == nil { + return nil, false, fmt.Errorf("store is not available yet") + } + + identityTemplate := &v2.CiliumIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: id.String(), + }, + } + + obj, exists, err := c.Store.Get(identityTemplate) + if err != nil { + return nil, exists, err + } + if !exists { + return nil, exists, nil + } + + identity, ok := obj.(*v2.CiliumIdentity) + if !ok { + return nil, false, fmt.Errorf("invalid object %T", obj) + } + return identity, true, nil +} + +// GetByID returns the key associated with an ID. Returns nil if no key is +// associated with the ID. +// Note: the lock field is not supported with the k8s CRD allocator. +func (c *crdBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) { + identity, exists, err := c.getById(ctx, id) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + + return c.KeyFunc(identity.SecurityLabels), nil +} + +// Release dissociates this node from using the identity bound to the given ID. +// When an identity has no references it may be garbage collected. +func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) { + // For CiliumIdentity-based allocation, the reference counting is + // handled via CiliumEndpoint. Any CiliumEndpoint referring to a + // CiliumIdentity will keep the CiliumIdentity alive. No action is + // needed to release the reference here. + return nil +} + +func getIdentitiesByKeyFunc(keyFunc func(map[string]string) allocator.AllocatorKey) func(obj interface{}) ([]string, error) { + return func(obj interface{}) ([]string, error) { + if identity, ok := obj.(*v2.CiliumIdentity); ok { + return []string{keyFunc(identity.SecurityLabels).GetKey()}, nil + } + return []string{}, fmt.Errorf("object other than CiliumIdentity was pushed to the store") + } +} + +func (c *crdBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) { + c.Store = cache.NewIndexer( + cache.DeletionHandlingMetaNamespaceKeyFunc, + cache.Indexers{byKeyIndex: getIdentitiesByKeyFunc(c.KeyFunc)}) + identityInformer := informer.NewInformerWithStore( + k8sUtils.ListerWatcherFromTyped[*v2.CiliumIdentityList](c.Client.CiliumV2().CiliumIdentities()), + &v2.CiliumIdentity{}, + 0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if identity, ok := obj.(*v2.CiliumIdentity); ok { + if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil { + handler.OnAdd(idpool.ID(id), c.KeyFunc(identity.SecurityLabels)) + } + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + if oldIdentity, ok := newObj.(*v2.CiliumIdentity); ok { + if newIdentity, ok := newObj.(*v2.CiliumIdentity); ok { + if oldIdentity.DeepEqual(newIdentity) { + return + } + if id, err := strconv.ParseUint(newIdentity.Name, 10, 64); err == nil { + handler.OnModify(idpool.ID(id), c.KeyFunc(newIdentity.SecurityLabels)) + } + } + } + }, + DeleteFunc: func(obj interface{}) { + // The delete event is sometimes for items with unknown state that are + // deleted anyway. + if deleteObj, isDeleteObj := obj.(cache.DeletedFinalStateUnknown); isDeleteObj { + obj = deleteObj.Obj + } + + if identity, ok := obj.(*v2.CiliumIdentity); ok { + if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil { + handler.OnDelete(idpool.ID(id), c.KeyFunc(identity.SecurityLabels)) + } + } else { + log.Debugf("Ignoring unknown delete event %#v", obj) + } + }, + }, + nil, + c.Store, + ) + + go func() { + if ok := cache.WaitForCacheSync(stopChan, identityInformer.HasSynced); ok { + handler.OnListDone() + } + }() + + identityInformer.Run(stopChan) +} + +func (c *crdBackend) Status() (string, error) { + return "OK", nil +} + +func (c *crdBackend) Encode(v string) string { + return v +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go b/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go new file mode 100644 index 0000000000..24680ce73d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package informer + +import ( + "errors" + "fmt" + "net/http" + + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + utilRuntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/time" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "k8s") + +func init() { + utilRuntime.PanicHandlers = append( + utilRuntime.PanicHandlers, + func(r interface{}) { + // from k8s library + if err, ok := r.(error); ok && errors.Is(err, http.ErrAbortHandler) { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + log.Fatal("Panic in Kubernetes runtime handler") + }, + ) +} + +type privateRunner struct { + cache.Controller + cacheMutationDetector cache.MutationDetector +} + +func (p *privateRunner) Run(stopCh <-chan struct{}) { + go p.cacheMutationDetector.Run(stopCh) + p.Controller.Run(stopCh) +} + +// NewInformer is a copy of k8s.io/client-go/tools/cache/NewInformer includes the default cache MutationDetector. +func NewInformer( + lw cache.ListerWatcher, + objType k8sRuntime.Object, + resyncPeriod time.Duration, + h cache.ResourceEventHandler, + transformer cache.TransformFunc, +) (cache.Store, cache.Controller) { + // This will hold the client state, as we know it. + clientState := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, NewInformerWithStore(lw, objType, resyncPeriod, h, transformer, clientState) +} + +// NewIndexerInformer is a copy of k8s.io/client-go/tools/cache/NewIndexerInformer but includes the +// default cache MutationDetector. +func NewIndexerInformer( + lw cache.ListerWatcher, + objType k8sRuntime.Object, + resyncPeriod time.Duration, + h cache.ResourceEventHandler, + transformer cache.TransformFunc, + indexers cache.Indexers, +) (cache.Indexer, cache.Controller) { + clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, indexers) + return clientState, NewInformerWithStore(lw, objType, resyncPeriod, h, transformer, clientState) +} + +// NewInformerWithStore uses the same arguments as NewInformer for which a caller can also set a +// cache.Store and includes the default cache MutationDetector. +func NewInformerWithStore( + lw cache.ListerWatcher, + objType k8sRuntime.Object, + resyncPeriod time.Duration, + h cache.ResourceEventHandler, + transformer cache.TransformFunc, + clientState cache.Store, +) cache.Controller { + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + opts := cache.DeltaFIFOOptions{KeyFunction: cache.MetaNamespaceKeyFunc, KnownObjects: clientState} + fifo := cache.NewDeltaFIFOWithOptions(opts) + + cacheMutationDetector := cache.NewCacheMutationDetector(fmt.Sprintf("%T", objType)) + + cfg := &cache.Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}, isInInitialList bool) error { + // from oldest to newest + for _, d := range obj.(cache.Deltas) { + + var obj interface{} + if transformer != nil { + var err error + if obj, err = transformer(d.Object); err != nil { + return err + } + } else { + obj = d.Object + } + + // In CI we detect if the objects were modified and panic + // this is a no-op in production environments. + cacheMutationDetector.AddObject(obj) + + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + h.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + h.OnAdd(obj, isInInitialList) + } + case cache.Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + h.OnDelete(obj) + } + } + return nil + }, + } + return &privateRunner{ + Controller: cache.New(cfg), + cacheMutationDetector: cacheMutationDetector, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/json_patch.go b/vendor/github.com/cilium/cilium/pkg/k8s/json_patch.go new file mode 100644 index 0000000000..db58068796 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/json_patch.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +const ( + // maximum number of operations a single json patch may contain. + // See https://github.com/kubernetes/kubernetes/pull/74000 + MaxJSONPatchOperations = 10000 +) + +// JSONPatch structure based on the RFC 6902 +type JSONPatch struct { + OP string `json:"op,omitempty"` + Path string `json:"path,omitempty"` + Value interface{} `json:"value"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/labels.go new file mode 100644 index 0000000000..1a49678727 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/labels.go @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "regexp" + "strings" + + "github.com/sirupsen/logrus" + + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + k8sUtils "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" +) + +// UseOriginalSourceAddressLabel is the k8s label that can be added to a +// `CiliumEnvoyConfig`. This way the Cilium BPF Metadata listener filter is configured +// to use the original source address when extracting the metadata for a request. +const UseOriginalSourceAddressLabel = "cilium.io/use-original-source-address" + +const ( + // AnnotationIstioSidecarStatus is the annotation added by Istio into a pod + // when it is injected with a sidecar proxy. + // Since Istio 0.5.0, the value of this annotation is a serialized JSON object + // with the following structure ("imagePullSecrets" was added in Istio 0.8.0): + // + // { + // "version": "0213afe1274259d2f23feb4820ad2f8eb8609b84a5538e5f51f711545b6bde88", + // "initContainers": ["sleep", "istio-init"], + // "containers": ["istio-proxy"], + // "volumes": ["cilium-unix-sock-dir", "istio-envoy", "istio-certs"], + // "imagePullSecrets": null + // } + AnnotationIstioSidecarStatus = "sidecar.istio.io/status" + + // DefaultSidecarIstioProxyImageRegexp is the default regexp compiled into + // SidecarIstioProxyImageRegexp. + DefaultSidecarIstioProxyImageRegexp = "cilium/istio_proxy" +) + +// SidecarIstioProxyImageRegexp is the regular expression matching +// compatible Istio sidecar istio-proxy container image names. +// This is set by the "sidecar-istio-proxy-image" configuration flag. +var SidecarIstioProxyImageRegexp = regexp.MustCompile(DefaultSidecarIstioProxyImageRegexp) + +// isInjectedWithIstioSidecarProxy returns whether the given pod has been +// injected by Istio with a sidecar proxy that is compatible with Cilium. +func isInjectedWithIstioSidecarProxy(scopedLog *logrus.Entry, pod *slim_corev1.Pod) bool { + istioStatusString, ok := pod.Annotations[AnnotationIstioSidecarStatus] + if !ok { + // Istio's injection annotation was not found. + scopedLog.Debugf("No %s annotation", AnnotationIstioSidecarStatus) + return false + } + + scopedLog.Debugf("Found %s annotation with value: %s", + AnnotationIstioSidecarStatus, istioStatusString) + + // Check that there's an "istio-proxy" container that uses an image + // compatible with Cilium. + for _, container := range pod.Spec.Containers { + if container.Name != "istio-proxy" { + continue + } + scopedLog.Debug("Found istio-proxy container in pod") + + if !SidecarIstioProxyImageRegexp.MatchString(container.Image) { + continue + } + scopedLog.Debugf("istio-proxy container runs Cilium-compatible image: %s", container.Image) + + for _, mountPath := range container.VolumeMounts { + if mountPath.MountPath != "/var/run/cilium" { + continue + } + scopedLog.Debug("istio-proxy container has volume mounted into /var/run/cilium") + + return true + } + } + + scopedLog.Debug("No Cilium-compatible istio-proxy container found") + return false +} + +// GetPodMetadata returns the labels and annotations of the pod with the given +// namespace / name. +func GetPodMetadata(k8sNs *slim_corev1.Namespace, pod *slim_corev1.Pod) (containerPorts []slim_corev1.ContainerPort, lbls map[string]string, retAnno map[string]string, retErr error) { + namespace := pod.Namespace + scopedLog := log.WithFields(logrus.Fields{ + logfields.K8sNamespace: namespace, + logfields.K8sPodName: pod.Name, + }) + scopedLog.Debug("Connecting to k8s local stores to retrieve labels for pod") + + objMetaCpy := pod.ObjectMeta.DeepCopy() + annotations := objMetaCpy.Annotations + k8sLabels := filterPodLabels(objMetaCpy.Labels) + + // If the pod has been injected with an Istio sidecar proxy compatible with + // Cilium, add a label to notify that. + // If the pod already contains that label to explicitly enable or disable + // the sidecar proxy mode, keep it as is. + if val, ok := objMetaCpy.Labels[k8sConst.PolicyLabelIstioSidecarProxy]; ok { + k8sLabels[k8sConst.PolicyLabelIstioSidecarProxy] = val + } else if isInjectedWithIstioSidecarProxy(scopedLog, pod) { + k8sLabels[k8sConst.PolicyLabelIstioSidecarProxy] = "true" + } + + for _, containers := range pod.Spec.Containers { + containerPorts = append(containerPorts, containers.Ports...) + } + + labels := k8sUtils.SanitizePodLabels(k8sLabels, k8sNs, pod.Spec.ServiceAccountName, option.Config.ClusterName) + + return containerPorts, labels, annotations, nil +} + +// filterPodLabels returns a copy of the given labels map, without the labels owned by Cilium. +func filterPodLabels(labels map[string]string) map[string]string { + res := map[string]string{} + for k, v := range labels { + if strings.HasPrefix(k, k8sConst.LabelPrefix) { + continue + } + res[k] = v + } + return res +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/logfields.go b/vendor/github.com/cilium/cilium/pkg/k8s/logfields.go new file mode 100644 index 0000000000..bf6b46bfa9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/logfields.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// logging field definitions +const ( + // subsysK8s is the value for logfields.LogSubsys + subsysK8s = "k8s" +) + +var ( + // log is the k8s package logger object. + log = logging.DefaultLogger.WithField(logfields.LogSubsys, subsysK8s) +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go new file mode 100644 index 0000000000..f153855994 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +var ( + // LastInteraction is the time at which the last apiserver interaction + // occurred + LastInteraction eventTimestamper + // LastSuccessInteraction is the time at which we have received a successful + // k8s apiserver reply (i.e. a response code 2xx or 4xx). + LastSuccessInteraction eventTimestamper +) + +type eventTimestamper struct { + timestamp time.Time + lock lock.RWMutex +} + +// Reset sets the timestamp to the current time +func (e *eventTimestamper) Reset() { + e.lock.Lock() + e.timestamp = time.Now() + e.lock.Unlock() +} + +// Time returns the timestamp as set per Reset() +func (e *eventTimestamper) Time() time.Time { + e.lock.RLock() + t := e.timestamp + e.lock.RUnlock() + return t +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go b/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go new file mode 100644 index 0000000000..be320c5a9d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + + "github.com/cilium/cilium/pkg/annotation" + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils" + slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + k8sUtils "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/policy" + "github.com/cilium/cilium/pkg/policy/api" +) + +const ( + resourceTypeNetworkPolicy = "NetworkPolicy" +) + +var ( + allowAllNamespacesRequirement = slim_metav1.LabelSelectorRequirement{ + Key: k8sConst.PodNamespaceLabel, + Operator: slim_metav1.LabelSelectorOpExists, + } +) + +// GetPolicyLabelsv1 extracts the name of np. It uses the name from the Cilium +// annotation if present. If the policy's annotations do not contain +// the Cilium annotation, the policy's name field is used instead. +func GetPolicyLabelsv1(np *slim_networkingv1.NetworkPolicy) labels.LabelArray { + if np == nil { + log.Warningf("unable to extract policy labels because provided NetworkPolicy is nil") + return nil + } + + policyName, _ := annotation.Get(np, annotation.PolicyName, annotation.PolicyNameAlias) + policyUID := np.UID + + if policyName == "" { + policyName = np.Name + } + + // Here we are using ExtractNamespaceOrDefault instead of ExtractNamespace because we know + // for sure that the Object is namespace scoped, so if no namespace is provided instead + // of assuming that the Object is cluster scoped we return the default namespace. + ns := k8sUtils.ExtractNamespaceOrDefault(&np.ObjectMeta) + + return k8sCiliumUtils.GetPolicyLabels(ns, policyName, policyUID, resourceTypeNetworkPolicy) +} + +func parseNetworkPolicyPeer(namespace string, peer *slim_networkingv1.NetworkPolicyPeer) *api.EndpointSelector { + if peer == nil { + return nil + } + + var retSel *api.EndpointSelector + + if peer.NamespaceSelector != nil { + namespaceSelector := &slim_metav1.LabelSelector{ + MatchLabels: make(map[string]string, len(peer.NamespaceSelector.MatchLabels)), + } + // We use our own special label prefix for namespace metadata, + // thus we need to prefix that prefix to all NamespaceSelector.MatchLabels + for k, v := range peer.NamespaceSelector.MatchLabels { + namespaceSelector.MatchLabels[policy.JoinPath(k8sConst.PodNamespaceMetaLabels, k)] = v + } + + // We use our own special label prefix for namespace metadata, + // thus we need to prefix that prefix to all NamespaceSelector.MatchLabels + for _, matchExp := range peer.NamespaceSelector.MatchExpressions { + lsr := slim_metav1.LabelSelectorRequirement{ + Key: policy.JoinPath(k8sConst.PodNamespaceMetaLabels, matchExp.Key), + Operator: matchExp.Operator, + } + if matchExp.Values != nil { + lsr.Values = make([]string, len(matchExp.Values)) + copy(lsr.Values, matchExp.Values) + } + namespaceSelector.MatchExpressions = + append(namespaceSelector.MatchExpressions, lsr) + } + + // Empty namespace selector selects all namespaces (i.e., a namespace + // label exists). + if len(namespaceSelector.MatchLabels) == 0 && len(namespaceSelector.MatchExpressions) == 0 { + namespaceSelector.MatchExpressions = []slim_metav1.LabelSelectorRequirement{allowAllNamespacesRequirement} + } + + selector := api.NewESFromK8sLabelSelector(labels.LabelSourceK8sKeyPrefix, namespaceSelector, peer.PodSelector) + retSel = &selector + } else if peer.PodSelector != nil { + podSelector := parsePodSelector(peer.PodSelector, namespace) + selector := api.NewESFromK8sLabelSelector(labels.LabelSourceK8sKeyPrefix, podSelector) + retSel = &selector + } + + return retSel +} + +func hasV1PolicyType(pTypes []slim_networkingv1.PolicyType, typ slim_networkingv1.PolicyType) bool { + for _, pType := range pTypes { + if pType == typ { + return true + } + } + return false +} + +// ParseNetworkPolicy parses a k8s NetworkPolicy. Returns a list of +// Cilium policy rules that can be added, along with an error if there was an +// error sanitizing the rules. +func ParseNetworkPolicy(np *slim_networkingv1.NetworkPolicy) (api.Rules, error) { + + if np == nil { + return nil, fmt.Errorf("cannot parse NetworkPolicy because it is nil") + } + + ingresses := []api.IngressRule{} + egresses := []api.EgressRule{} + + // Since we know that the object NetworkPolicy is namespace scoped we assign + // namespace to default namespace if the field is empty in the object. + namespace := k8sUtils.ExtractNamespaceOrDefault(&np.ObjectMeta) + + for _, iRule := range np.Spec.Ingress { + fromRules := []api.IngressRule{} + if iRule.From != nil && len(iRule.From) > 0 { + for _, rule := range iRule.From { + ingress := api.IngressRule{} + endpointSelector := parseNetworkPolicyPeer(namespace, &rule) + + if endpointSelector != nil { + ingress.FromEndpoints = append(ingress.FromEndpoints, *endpointSelector) + } else { + // No label-based selectors were in NetworkPolicyPeer. + log.WithField(logfields.K8sNetworkPolicyName, np.Name).Debug("NetworkPolicyPeer does not have PodSelector or NamespaceSelector") + } + + // Parse CIDR-based parts of rule. + if rule.IPBlock != nil { + ingress.FromCIDRSet = append(ingress.FromCIDRSet, ipBlockToCIDRRule(rule.IPBlock)) + } + + fromRules = append(fromRules, ingress) + } + } else { + // Based on NetworkPolicyIngressRule docs: + // From []NetworkPolicyPeer + // If this field is empty or missing, this rule matches all + // sources (traffic not restricted by source). + ingress := api.IngressRule{} + ingress.FromEndpoints = append(ingress.FromEndpoints, api.WildcardEndpointSelector) + + fromRules = append(fromRules, ingress) + } + + // We apply the ports to all rules generated from the From section + if iRule.Ports != nil && len(iRule.Ports) > 0 { + toPorts := parsePorts(iRule.Ports) + for i := range fromRules { + fromRules[i].ToPorts = toPorts + } + } + + ingresses = append(ingresses, fromRules...) + } + + for _, eRule := range np.Spec.Egress { + toRules := []api.EgressRule{} + + if eRule.To != nil && len(eRule.To) > 0 { + for _, rule := range eRule.To { + egress := api.EgressRule{} + if rule.NamespaceSelector != nil || rule.PodSelector != nil { + endpointSelector := parseNetworkPolicyPeer(namespace, &rule) + + if endpointSelector != nil { + egress.ToEndpoints = append(egress.ToEndpoints, *endpointSelector) + } else { + log.WithField(logfields.K8sNetworkPolicyName, np.Name).Debug("NetworkPolicyPeer does not have PodSelector or NamespaceSelector") + } + } + if rule.IPBlock != nil { + egress.ToCIDRSet = append(egress.ToCIDRSet, ipBlockToCIDRRule(rule.IPBlock)) + } + + toRules = append(toRules, egress) + } + } else { + // Based on NetworkPolicyEgressRule docs: + // To []NetworkPolicyPeer + // If this field is empty or missing, this rule matches all + // destinations (traffic not restricted by destination) + egress := api.EgressRule{} + egress.ToEndpoints = append(egress.ToEndpoints, api.WildcardEndpointSelector) + + toRules = append(toRules, egress) + } + + // We apply the ports to all rules generated from the To section + if eRule.Ports != nil && len(eRule.Ports) > 0 { + toPorts := parsePorts(eRule.Ports) + for i := range toRules { + toRules[i].ToPorts = toPorts + } + } + + egresses = append(egresses, toRules...) + } + + // Convert the k8s default-deny model to the Cilium default-deny model + //spec: + // podSelector: {} + // policyTypes: + // - Ingress + // Since k8s 1.7 doesn't contain any PolicyTypes, we default deny + // if podSelector is empty and the policyTypes is not egress + if len(ingresses) == 0 && + (hasV1PolicyType(np.Spec.PolicyTypes, slim_networkingv1.PolicyTypeIngress) || + !hasV1PolicyType(np.Spec.PolicyTypes, slim_networkingv1.PolicyTypeEgress)) { + ingresses = []api.IngressRule{{}} + } + + // Convert the k8s default-deny model to the Cilium default-deny model + //spec: + // podSelector: {} + // policyTypes: + // - Egress + if len(egresses) == 0 && hasV1PolicyType(np.Spec.PolicyTypes, slim_networkingv1.PolicyTypeEgress) { + egresses = []api.EgressRule{{}} + } + + podSelector := parsePodSelector(&np.Spec.PodSelector, namespace) + + // The next patch will pass the UID. + rule := api.NewRule(). + WithEndpointSelector(api.NewESFromK8sLabelSelector(labels.LabelSourceK8sKeyPrefix, podSelector)). + WithLabels(GetPolicyLabelsv1(np)). + WithIngressRules(ingresses). + WithEgressRules(egresses) + + if err := rule.Sanitize(); err != nil { + return nil, err + } + + return api.Rules{rule}, nil +} + +// NetworkPolicyHasEndPort returns true if the network policy has an +// EndPort. +func NetworkPolicyHasEndPort(np *slim_networkingv1.NetworkPolicy) bool { + for _, iRule := range np.Spec.Ingress { + for _, port := range iRule.Ports { + if port.EndPort != nil && *port.EndPort > 0 { + return true + } + } + } + for _, eRule := range np.Spec.Egress { + for _, port := range eRule.Ports { + if port.EndPort != nil && *port.EndPort > 0 { + return true + } + } + } + return false +} + +func parsePodSelector(podSelectorIn *slim_metav1.LabelSelector, namespace string) *slim_metav1.LabelSelector { + podSelector := &slim_metav1.LabelSelector{ + MatchLabels: make(map[string]slim_metav1.MatchLabelsValue, len(podSelectorIn.MatchLabels)), + } + for k, v := range podSelectorIn.MatchLabels { + podSelector.MatchLabels[k] = v + } + // The PodSelector should only reflect to the same namespace + // the policy is being stored, thus we add the namespace to + // the MatchLabels map. + podSelector.MatchLabels[k8sConst.PodNamespaceLabel] = namespace + + for _, matchExp := range podSelectorIn.MatchExpressions { + lsr := slim_metav1.LabelSelectorRequirement{ + Key: matchExp.Key, + Operator: matchExp.Operator, + } + if matchExp.Values != nil { + lsr.Values = make([]string, len(matchExp.Values)) + copy(lsr.Values, matchExp.Values) + } + podSelector.MatchExpressions = + append(podSelector.MatchExpressions, lsr) + } + return podSelector +} + +func ipBlockToCIDRRule(block *slim_networkingv1.IPBlock) api.CIDRRule { + cidrRule := api.CIDRRule{} + cidrRule.Cidr = api.CIDR(block.CIDR) + for _, v := range block.Except { + cidrRule.ExceptCIDRs = append(cidrRule.ExceptCIDRs, api.CIDR(v)) + } + return cidrRule +} + +// parsePorts converts list of K8s NetworkPolicyPorts to Cilium PortRules. +func parsePorts(ports []slim_networkingv1.NetworkPolicyPort) []api.PortRule { + portRules := []api.PortRule{} + for _, port := range ports { + protocol := api.ProtoTCP + if port.Protocol != nil { + protocol, _ = api.ParseL4Proto(string(*port.Protocol)) + } + + portStr := "0" + if port.Port != nil { + portStr = port.Port.String() + } + + portRule := api.PortRule{ + Ports: []api.PortProtocol{ + {Port: portStr, Protocol: protocol}, + }, + } + + portRules = append(portRules, portRule) + } + + return portRules +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/node.go b/vendor/github.com/cilium/cilium/pkg/k8s/node.go new file mode 100644 index 0000000000..50d8037702 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/node.go @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + "net" + "strconv" + + "github.com/cilium/cilium/pkg/annotation" + "github.com/cilium/cilium/pkg/cidr" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/node/addressing" + nodeTypes "github.com/cilium/cilium/pkg/node/types" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/source" + + "github.com/sirupsen/logrus" +) + +// ParseNodeAddressType converts a Kubernetes NodeAddressType to a Cilium +// NodeAddressType. If the Kubernetes NodeAddressType does not have a +// corresponding Cilium AddressType, returns an error. +func ParseNodeAddressType(k8sAddress slim_corev1.NodeAddressType) (addressing.AddressType, error) { + + var err error + convertedAddr := addressing.AddressType(k8sAddress) + + switch convertedAddr { + case addressing.NodeExternalDNS, addressing.NodeExternalIP, addressing.NodeHostName, addressing.NodeInternalIP, addressing.NodeInternalDNS: + default: + err = fmt.Errorf("invalid Kubernetes NodeAddressType %s", convertedAddr) + } + return convertedAddr, err +} + +type nodeAddressGroup struct { + typ slim_corev1.NodeAddressType + family slim_corev1.IPFamily +} + +// ParseNode parses a kubernetes node to a cilium node +func ParseNode(k8sNode *slim_corev1.Node, source source.Source) *nodeTypes.Node { + addrGroups := make(map[nodeAddressGroup]struct{}) + scopedLog := log.WithFields(logrus.Fields{ + logfields.NodeName: k8sNode.Name, + logfields.K8sNodeID: k8sNode.UID, + }) + addrs := []nodeTypes.Address{} + for _, addr := range k8sNode.Status.Addresses { + // We only care about this address types, + // we ignore all other types. + switch addr.Type { + case slim_corev1.NodeInternalIP, slim_corev1.NodeExternalIP: + default: + continue + } + // If the address is not set let's not parse it at all. + // This can be the case for corev1.NodeExternalIPs + if addr.Address == "" { + continue + } + addrGroup := nodeAddressGroup{ + typ: addr.Type, + } + ip := net.ParseIP(addr.Address) + switch { + case ip != nil && ip.To4() != nil: + addrGroup.family = slim_corev1.IPv4Protocol + case ip != nil && ip.To16() != nil: + addrGroup.family = slim_corev1.IPv6Protocol + default: + scopedLog.WithFields(logrus.Fields{ + logfields.IPAddr: addr.Address, + logfields.Type: addr.Type, + }).Warn("Ignoring invalid node IP") + continue + } + _, groupFound := addrGroups[addrGroup] + if groupFound { + scopedLog.WithFields(logrus.Fields{ + logfields.Node: k8sNode.Name, + logfields.Type: addr.Type, + }).Warn("Detected multiple IPs of the same address type and family, Cilium will only consider the first IP in the Node resource") + continue + } + addrGroups[addrGroup] = struct{}{} + + addressType, err := ParseNodeAddressType(addr.Type) + if err != nil { + scopedLog.WithError(err).Warn("invalid address type for node") + } + + na := nodeTypes.Address{ + Type: addressType, + IP: ip, + } + addrs = append(addrs, na) + } + newNode := &nodeTypes.Node{ + Name: k8sNode.Name, + Cluster: option.Config.ClusterName, + IPAddresses: addrs, + Source: source, + } + + if len(k8sNode.Spec.PodCIDRs) != 0 { + if len(k8sNode.Spec.PodCIDRs) > 2 { + scopedLog.WithField("podCIDR", k8sNode.Spec.PodCIDRs).Errorf("Invalid PodCIDRs expected 1 or 2 PodCIDRs, received %d", len(k8sNode.Spec.PodCIDRs)) + } else { + for _, podCIDR := range k8sNode.Spec.PodCIDRs { + if allocCIDR, err := cidr.ParseCIDR(podCIDR); err != nil { + scopedLog.WithError(err).WithField("podCIDR", k8sNode.Spec.PodCIDR).Warn("Invalid PodCIDR value for node") + } else { + if allocCIDR.IP.To4() != nil { + newNode.IPv4AllocCIDR = allocCIDR + } else { + newNode.IPv6AllocCIDR = allocCIDR + } + } + } + } + } else if len(k8sNode.Spec.PodCIDR) != 0 { + if allocCIDR, err := cidr.ParseCIDR(k8sNode.Spec.PodCIDR); err != nil { + scopedLog.WithError(err).WithField(logfields.V4Prefix, k8sNode.Spec.PodCIDR).Warn("Invalid PodCIDR value for node") + } else { + if allocCIDR.IP.To4() != nil { + newNode.IPv4AllocCIDR = allocCIDR + } else { + newNode.IPv6AllocCIDR = allocCIDR + } + } + } + + newNode.Labels = k8sNode.GetLabels() + newNode.Annotations = make(map[string]string) + // Propagate only Cilium specific annotations. + for key, value := range k8sNode.GetAnnotations() { + if annotation.CiliumPrefixRegex.MatchString(key) { + newNode.Annotations[key] = value + } + } + + if !option.Config.AnnotateK8sNode { + return newNode + } + + // Any code bellow this line will depend on k8s node annotations. If we are + // not annotating the node then we should not use any annotations. + + k8sNodeAddHostIP := func(key string, alias string) { + if ciliumInternalIP, ok := annotation.Get(k8sNode, key, alias); !ok || ciliumInternalIP == "" { + scopedLog.Debugf("Missing %s (or %s). Annotation required when IPSec Enabled", key, alias) + } else if ip := net.ParseIP(ciliumInternalIP); ip == nil { + scopedLog.Debugf("ParseIP %s error", ciliumInternalIP) + } else { + na := nodeTypes.Address{ + Type: addressing.NodeCiliumInternalIP, + IP: ip, + } + addrs = append(addrs, na) + scopedLog.Debugf("Add NodeCiliumInternalIP: %s", ip) + } + } + + k8sNodeAddHostIP(annotation.CiliumHostIP, annotation.CiliumHostIPAlias) + k8sNodeAddHostIP(annotation.CiliumHostIPv6, annotation.CiliumHostIPv6Alias) + newNode.IPAddresses = addrs + + if key, ok := annotation.Get(k8sNode, annotation.CiliumEncryptionKey, annotation.CiliumEncryptionKeyAlias); ok { + if u, err := strconv.ParseUint(key, 10, 8); err == nil { + newNode.EncryptionKey = uint8(u) + } + } + + // Spec.PodCIDR takes precedence since it's + // the CIDR assigned by k8s controller manager + // In case it's invalid or empty then we fall back to our annotations. + if newNode.IPv4AllocCIDR == nil { + if ipv4CIDR, ok := annotation.Get(k8sNode, annotation.V4CIDRName, annotation.V4CIDRNameAlias); !ok || ipv4CIDR == "" { + scopedLog.Debug("Empty IPv4 CIDR annotation in node") + } else { + allocCIDR, err := cidr.ParseCIDR(ipv4CIDR) + if err != nil { + scopedLog.WithError(err).WithField(logfields.V4Prefix, ipv4CIDR).Error("BUG, invalid IPv4 annotation CIDR in node") + } else { + newNode.IPv4AllocCIDR = allocCIDR + } + } + } + + if newNode.IPv6AllocCIDR == nil { + if ipv6CIDR, ok := annotation.Get(k8sNode, annotation.V6CIDRName, annotation.V6CIDRNameAlias); !ok || ipv6CIDR == "" { + scopedLog.Debug("Empty IPv6 CIDR annotation in node") + } else { + allocCIDR, err := cidr.ParseCIDR(ipv6CIDR) + if err != nil { + scopedLog.WithError(err).WithField(logfields.V6Prefix, ipv6CIDR).Error("BUG, invalid IPv6 annotation CIDR in node") + } else { + newNode.IPv6AllocCIDR = allocCIDR + } + } + } + + if newNode.IPv4HealthIP == nil { + if healthIP, ok := annotation.Get(k8sNode, annotation.V4HealthName, annotation.V4HealthNameAlias); !ok || healthIP == "" { + scopedLog.Debug("Empty IPv4 health endpoint annotation in node") + } else if ip := net.ParseIP(healthIP); ip == nil { + scopedLog.WithField(logfields.V4HealthIP, healthIP).Error("BUG, invalid IPv4 health endpoint annotation in node") + } else { + newNode.IPv4HealthIP = ip + } + } + + if newNode.IPv6HealthIP == nil { + if healthIP, ok := annotation.Get(k8sNode, annotation.V6HealthName, annotation.V6HealthNameAlias); !ok || healthIP == "" { + scopedLog.Debug("Empty IPv6 health endpoint annotation in node") + } else if ip := net.ParseIP(healthIP); ip == nil { + scopedLog.WithField(logfields.V6HealthIP, healthIP).Error("BUG, invalid IPv6 health endpoint annotation in node") + } else { + newNode.IPv6HealthIP = ip + } + } + + if newNode.IPv4IngressIP == nil { + if ingressIP, ok := annotation.Get(k8sNode, annotation.V4IngressName, annotation.V4IngressNameAlias); !ok || ingressIP == "" { + scopedLog.Debug("Empty IPv4 Ingress annotation in node") + } else if ip := net.ParseIP(ingressIP); ip == nil { + scopedLog.WithField(logfields.V4IngressIP, ingressIP).Error("BUG, invalid IPv4 Ingress annotation in node") + } else { + newNode.IPv4IngressIP = ip + } + } + + if newNode.IPv6IngressIP == nil { + if ingressIP, ok := annotation.Get(k8sNode, annotation.V6IngressName, annotation.V6IngressNameAlias); !ok || ingressIP == "" { + scopedLog.Debug("Empty IPv6 Ingress annotation in node") + } else if ip := net.ParseIP(ingressIP); ip == nil { + scopedLog.WithField(logfields.V6IngressIP, ingressIP).Error("BUG, invalid IPv6 Ingress annotation in node") + } else { + newNode.IPv6IngressIP = ip + } + } + + return newNode +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/error.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/error.go new file mode 100644 index 0000000000..fdb64a3014 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/error.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +type ErrorAction string + +var ( + // ErrorActionRetry instructs to retry the processing. The key is requeued after + // rate limiting. + ErrorActionRetry ErrorAction = "retry" + + // ErrorActionIgnore instructs to ignore the error. + ErrorActionIgnore ErrorAction = "ignore" + + // ErrorActionStop instructs to stop the processing for this subscriber. + ErrorActionStop ErrorAction = "stop" +) + +// ErrorHandler is a function that takes the key of the failing object (zero key if event +// was sync), the number of times the key has been retried and the error that occurred. +// The function returns the action that should be taken. +type ErrorHandler func(key Key, numRetries int, err error) ErrorAction + +// AlwaysRetry is an error handler that always retries the error. +func AlwaysRetry(Key, int, error) ErrorAction { + return ErrorActionRetry +} + +// RetryUpTo is an error handler that retries a key up to specified number of +// times before stopping. +func RetryUpTo(n int) ErrorHandler { + return func(key Key, numRetries int, err error) ErrorAction { + if numRetries >= n { + return ErrorActionStop + } + return ErrorActionRetry + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/event.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/event.go new file mode 100644 index 0000000000..8c97e21ee1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/event.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +import ( + k8sRuntime "k8s.io/apimachinery/pkg/runtime" +) + +type EventKind string + +const ( + Sync EventKind = "sync" + Upsert EventKind = "upsert" + Delete EventKind = "delete" +) + +// Event emitted from resource. +type Event[T k8sRuntime.Object] struct { + Kind EventKind + Key Key + Object T + + // Done marks the event as processed. If err is non-nil, the + // key of the object is requeued and the processing retried at + // a later time with a potentially new version of the object. + // + // If this method is not called after the references to the event + // are gone, the finalizer will panic. + Done func(err error) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/key.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/key.go new file mode 100644 index 0000000000..99fa94b3f5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/key.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/tools/cache" +) + +// Key of an K8s object, e.g. name and optional namespace. +type Key struct { + // Name is the name of the object + Name string + + // Namespace is the namespace, or empty if object is not namespaced. + Namespace string +} + +func (k Key) String() string { + if len(k.Namespace) > 0 { + return k.Namespace + "/" + k.Name + } + return k.Name +} + +func NewKey(obj any) Key { + if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { + namespace, name, _ := cache.SplitMetaNamespaceKey(d.Key) + return Key{name, namespace} + } + + meta, err := meta.Accessor(obj) + if err != nil { + return Key{} + } + if len(meta.GetNamespace()) > 0 { + return Key{meta.GetName(), meta.GetNamespace()} + } + return Key{meta.GetName(), ""} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go new file mode 100644 index 0000000000..d11ab5a776 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go @@ -0,0 +1,912 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +import ( + "context" + "fmt" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/cilium/cilium/pkg/hive/cell" + k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics" + "github.com/cilium/cilium/pkg/k8s/watchers/resources" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/promise" + "github.com/cilium/cilium/pkg/stream" +) + +// Resource provides access to a Kubernetes resource through either +// a stream of events or a read-only store. +// +// Observing of the events can be done from a constructor as subscriber +// registration is non-blocking. +// +// Store() however should only be called from a start hook, or from a +// goroutine forked from the start hook as it blocks until the store +// has been synchronized. +// +// The subscriber can process the events from Events() asynchronously and in +// parallel, but for each event the Done() function must be called to mark +// the event as handled. If not done no new events will be emitted for this key. +// If an event handling is marked as failed the configured error handler is called +// (WithErrorHandler). The default error handler will requeue the event (by its key) for +// later retried processing. The requeueing is rate limited and can be configured with +// WithRateLimiter option to Events(). +// +// The resource is lazy, e.g. it will not start the informer until a call +// has been made to Events() or Store(). +type Resource[T k8sRuntime.Object] interface { + // Resource can be observed either via Observe() or via Events(). The observable + // is implemented in terms of Events() and same semantics apply. + stream.Observable[Event[T]] + + // Events returns a channel of events. Each event must be marked as handled + // with a call to Done() which marks the key processed. No new events for this key + // will be emitted before Done() is called. + // + // A missing Done() will lead to an eventual panic (via finalizer on Event[T]). + // Panic on this situation is needed as otherwise no new events would be emitted + // and thus this needs to be enforced. + // + // A stream of Upsert events are emitted first to replay the current state of the + // store after which incremental upserts and deletes follow until the underlying + // store is synchronized after which a Sync event is emitted and further incremental + // updates: + // + // (start observing), Upsert, Upsert, Upsert, (done replaying store contents), Upsert, Upsert, + // (store synchronized with API server), Sync, Upsert, Delete, Upsert, ... + // + // The emitting of the Sync event does not depend on whether or not Upsert events have + // all been marked Done() without an error. The sync event solely signals that the underlying + // store has synchronized and that Upsert events for objects in a synchronized store have been + // sent to the observer. + // + // When Done() is called with non-nil error the error handler is invoked, which + // can ignore, requeue the event (by key) or close the channel. The default error handler + // will requeue. + // + // If an Upsert is retried and the object has been deleted, a Delete event will be emitted instead. + // Conversely if a Delete event is retried and the object has been recreated with the same key, + // an Upsert will be emitted instead. + // + // If an objects is created and immediately deleted, then a slow observer may not observe this at + // all. In all cases a Delete event is only emitted if the observer has seen an Upsert. Whether or + // not it had been successfully handled (via Done(nil)) does not affect this property. + Events(ctx context.Context, opts ...EventsOpt) <-chan Event[T] + + // Store retrieves the read-only store for the resource. Blocks until + // the store has been synchronized or the context cancelled. + // Returns a non-nil error if context is cancelled or the resource + // has been stopped before store has synchronized. + Store(context.Context) (Store[T], error) +} + +// New creates a new Resource[T]. Use with hive.Provide: +// +// var exampleCell = hive.Module( +// "example", +// cell.Provide( +// // Provide `Resource[*slim_corev1.Pod]` to the hive: +// func(lc cell.Lifecycle, c k8sClient.Clientset) resource.Resource[*slim_corev1.Pod] { +// lw := utils.ListerWatcherFromTyped[*slim_corev1.PodList]( +// c.Slim().CoreV1().Pods(""), +// ) +// return resource.New(lc, lw) +// } +// }), +// ... +// ) +// +// func usePods(pods resource.Resource[*slim_corev1.Pod]) { +// go func() { +// for ev := range podEvents { +// onPodEvent(ev) +// } +// } +// return e +// } +// func onPodEvent(event resource.Event[*slim_core.Pod]) { +// switch event.Kind { +// case resource.Sync: +// // Pods have now been synced and the set of Upsert events +// // received thus far forms a coherent snapshot. +// +// // Must always call event.Done(error) to mark the event as processed. +// event.Done(nil) +// case resource.Upsert: +// event.Done(onPodUpsert(event.Object)) +// case resource.Delete: +// event.Done(onPodDelete(event.Object)) +// } +// } +// +// See also pkg/k8s/resource/example/main.go for a runnable example. +func New[T k8sRuntime.Object](lc cell.Lifecycle, lw cache.ListerWatcher, opts ...ResourceOption) Resource[T] { + r := &resource[T]{ + lw: lw, + } + r.opts.sourceObj = func() k8sRuntime.Object { + var obj T + return obj + } + for _, o := range opts { + o(&r.opts) + } + r.ctx, r.cancel = context.WithCancel(context.Background()) + r.reset() + lc.Append(r) + return r +} + +type options struct { + transform cache.TransformFunc // if non-nil, the object is transformed with this function before storing + sourceObj func() k8sRuntime.Object // prototype for the object before it is transformed + indexers cache.Indexers // map of the optional custom indexers to be added to the underlying resource informer + metricScope string // the scope label used when recording metrics for the resource + name string // the name label used for the workqueue metrics + releasable bool // if true, the underlying informer will be stopped when the last subscriber cancels its subscription +} + +type ResourceOption func(o *options) + +// WithTransform sets the function to transform the object before storing it. +func WithTransform[From, To k8sRuntime.Object](transform func(From) (To, error)) ResourceOption { + return WithLazyTransform( + func() k8sRuntime.Object { + var obj From + return obj + }, + func(fromRaw any) (any, error) { + if from, ok := fromRaw.(From); ok { + to, err := transform(from) + return to, err + } else { + var obj From + return nil, fmt.Errorf("resource.WithTransform: expected %T, got %T", obj, fromRaw) + } + }) +} + +// WithLazyTransform sets the function to transform the object before storing it. +// Unlike "WithTransform", this defers the resolving of the source object type until the resource +// is needed. Use this in situations where the source object depends on api-server capabilities. +func WithLazyTransform(sourceObj func() k8sRuntime.Object, transform cache.TransformFunc) ResourceOption { + return func(o *options) { + o.sourceObj = sourceObj + o.transform = transform + } +} + +// WithMetric enables metrics collection for the resource using the provided scope. +func WithMetric(scope string) ResourceOption { + return func(o *options) { + o.metricScope = scope + } +} + +// WithIndexers sets additional custom indexers on the resource store. +func WithIndexers(indexers cache.Indexers) ResourceOption { + return func(o *options) { + o.indexers = indexers + } +} + +// WithName sets the name of the resource. Used for workqueue metrics. +func WithName(name string) ResourceOption { + return func(o *options) { + o.name = name + } +} + +// WithStoppableInformer marks the resource as releasable. A releasable resource stops +// the underlying informer if the last active subscriber cancels its subscription. +// In this case the resource is stopped and prepared again for a subsequent call to +// either Events() or Store(). +// A subscriber is a consumer who has taken a reference to the store with Store() or that +// is listening to the events stream channel with Events(). +// This option is meant to be used for very specific cases of resources with a high rate +// of updates that can potentially hinder scalability in very large clusters, like +// CiliumNode and CiliumEndpoint. +// For this cases, stopping the informer is required when switching to other data sources +// that scale better. +func WithStoppableInformer() ResourceOption { + return func(o *options) { + o.releasable = true + } +} + +type resource[T k8sRuntime.Object] struct { + mu lock.RWMutex + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + opts options + + needed chan struct{} + + subscribers map[uint64]*subscriber[T] + subId uint64 + + lw cache.ListerWatcher + synchronized bool // flipped to true when informer has synced. + + storePromise promise.Promise[Store[T]] + storeResolver promise.Resolver[Store[T]] + + // meaningful for releasable resources only + refsMu lock.Mutex + refs uint64 + resetCtx context.Context + resetCancel context.CancelFunc +} + +var _ Resource[*corev1.Node] = &resource[*corev1.Node]{} + +func (r *resource[T]) Store(ctx context.Context) (Store[T], error) { + r.markNeeded() + + // Wait until store has synchronized to avoid querying a store + // that has not finished the initial listing. + hasSynced := func() bool { + r.mu.RLock() + defer r.mu.RUnlock() + return r.synchronized + } + cache.WaitForCacheSync(ctx.Done(), hasSynced) + + // use an error handler to release the resource if the store promise + // is rejected or the context is cancelled before the cache has synchronized. + return promise.MapError(r.storePromise, func(err error) error { + r.release() + return err + }).Await(ctx) +} + +func (r *resource[T]) metricEventProcessed(eventKind EventKind, status bool) { + if r.opts.metricScope == "" { + return + } + + result := "success" + if !status { + result = "failed" + } + + var action string + switch eventKind { + case Sync: + return + case Upsert: + action = "update" + case Delete: + action = "delete" + } + + metrics.KubernetesEventProcessed.WithLabelValues(r.opts.metricScope, action, result).Inc() +} + +func (r *resource[T]) metricEventReceived(action string, valid, equal bool) { + if r.opts.metricScope == "" { + return + } + + k8smetrics.LastInteraction.Reset() + + metrics.EventTS.WithLabelValues(metrics.LabelEventSourceK8s, r.opts.metricScope, action).SetToCurrentTime() + validStr := strconv.FormatBool(valid) + equalStr := strconv.FormatBool(equal) + metrics.KubernetesEventReceived.WithLabelValues(r.opts.metricScope, action, validStr, equalStr).Inc() +} + +func (r *resource[T]) Start(cell.HookContext) error { + r.start() + return nil +} + +func (r *resource[T]) start() { + // Don't start the resource if it has been definitely stopped + if r.ctx.Err() != nil { + return + } + r.wg.Add(1) + go r.startWhenNeeded() +} + +func (r *resource[T]) markNeeded() { + if r.opts.releasable { + r.refsMu.Lock() + r.refs++ + r.refsMu.Unlock() + } + + select { + case r.needed <- struct{}{}: + default: + } +} + +func (r *resource[T]) startWhenNeeded() { + defer r.wg.Done() + + // Wait until we're needed before starting the informer. + select { + case <-r.ctx.Done(): + return + case <-r.needed: + } + + // Short-circuit if we're being stopped. + if r.ctx.Err() != nil { + return + } + + store, informer := r.newInformer() + r.storeResolver.Resolve(&typedStore[T]{ + store: store, + release: r.release, + }) + + r.wg.Add(1) + go func() { + defer r.wg.Done() + informer.Run(merge(r.ctx.Done(), r.resetCtx.Done())) + }() + + // Wait for cache to be synced before emitting the sync event. + if cache.WaitForCacheSync(merge(r.ctx.Done(), r.resetCtx.Done()), informer.HasSynced) { + // Emit the sync event for all subscribers. Subscribers + // that subscribe afterwards will emit it by checking + // r.synchronized. + r.mu.Lock() + for _, sub := range r.subscribers { + sub.enqueueSync() + } + r.synchronized = true + r.mu.Unlock() + } +} + +func (r *resource[T]) Stop(stopCtx cell.HookContext) error { + if r.opts.releasable { + // grab the refs lock to avoid a concurrent restart for releasable resource + r.refsMu.Lock() + defer r.refsMu.Unlock() + } + + r.cancel() + r.wg.Wait() + return nil +} + +type eventsOpts struct { + rateLimiter workqueue.RateLimiter + errorHandler ErrorHandler +} + +type EventsOpt func(*eventsOpts) + +// WithRateLimiter sets the rate limiting algorithm to be used when requeueing failed events. +func WithRateLimiter(r workqueue.RateLimiter) EventsOpt { + return func(o *eventsOpts) { + o.rateLimiter = r + } +} + +// WithErrorHandler specifies the error handling strategy for failed events. By default +// the strategy is to always requeue the processing of a failed event. +func WithErrorHandler(h ErrorHandler) EventsOpt { + return func(o *eventsOpts) { + o.errorHandler = h + } +} + +func (r *resource[T]) Observe(ctx context.Context, next func(Event[T]), complete func(error)) { + stream.FromChannel(r.Events(ctx)).Observe(ctx, next, complete) +} + +// Events subscribes the caller to resource events. +// +// Each subscriber has their own queues and can process events at their own +// rate. Only object keys are queued and if an object is changed multiple times +// before the subscriber can handle the event only the latest state of object +// is emitted. +// +// The 'ctx' is used to cancel the subscription. The returned channel will be +// closed when context is cancelled. +// +// Options are supported to configure rate limiting of retries +// (WithRateLimiter), error handling strategy (WithErrorHandler). +// +// By default all errors are retried, the default rate limiter of workqueue +// package is used and the channel is unbuffered. +func (r *resource[T]) Events(ctx context.Context, opts ...EventsOpt) <-chan Event[T] { + _, callerFile, callerLine, _ := runtime.Caller(1) + debugInfo := fmt.Sprintf("%T.Events() called from %s:%d", r, callerFile, callerLine) + + options := eventsOpts{ + errorHandler: AlwaysRetry, // Default error handling is to always retry. + rateLimiter: workqueue.DefaultControllerRateLimiter(), + } + for _, apply := range opts { + apply(&options) + } + + // Mark the resource as needed. This will start the informer if it was not already. + r.markNeeded() + + out := make(chan Event[T]) + ctx, subCancel := context.WithCancel(ctx) + + sub := &subscriber[T]{ + r: r, + options: options, + debugInfo: debugInfo, + wq: workqueue.NewRateLimitingQueueWithConfig(options.rateLimiter, + workqueue.RateLimitingQueueConfig{Name: r.resourceName()}), + } + + // Fork a goroutine to process the queued keys and pass them to the subscriber. + r.wg.Add(1) + go func() { + defer r.release() + defer r.wg.Done() + defer close(out) + + // Grab a handle to the store. Asynchronous as informer is started in the background. + store, err := r.storePromise.Await(ctx) + if err != nil { + // Subscriber cancelled before the informer started, bail out. + return + } + + r.mu.Lock() + subId := r.subId + r.subId++ + r.subscribers[subId] = sub + + // Populate the queue with the initial set of keys that are already + // in the store. Done under the resource lock to synchronize with delta + // processing to make sure we don't end up queuing the key as initial key, + // processing it and then requeuing it again. + initialKeys := store.IterKeys() + for initialKeys.Next() { + sub.enqueueKey(initialKeys.Key()) + } + + // If the informer is already synchronized, then the above set of keys is a consistent + // snapshot and we can queue the sync entry. If we're not yet synchronized the sync will + // be queued from startWhenNeeded() after the informer has synchronized. + if r.synchronized { + sub.enqueueSync() + } + r.mu.Unlock() + + sub.processLoop(ctx, out, store) + + r.mu.Lock() + delete(r.subscribers, subId) + r.mu.Unlock() + }() + + // Fork a goroutine to wait for either the subscriber cancelling or the resource + // shutting down. + r.wg.Add(1) + go func() { + defer r.wg.Done() + select { + case <-r.ctx.Done(): + case <-r.resetCtx.Done(): + case <-ctx.Done(): + } + subCancel() + sub.wq.ShutDownWithDrain() + }() + + return out +} + +func (r *resource[T]) release() { + if !r.opts.releasable { + return + } + + // in case of a releasable resource, stop the underlying informer when the last + // reference to it is released. The resource is restarted to be + // ready again in case of a subsequent call to either Events() or Store(). + + r.refsMu.Lock() + defer r.refsMu.Unlock() + + r.refs-- + if r.refs > 0 { + return + } + + r.resetCancel() + r.wg.Wait() + close(r.needed) + + r.reset() + r.start() +} + +func (r *resource[T]) reset() { + r.subscribers = make(map[uint64]*subscriber[T]) + r.needed = make(chan struct{}, 1) + r.synchronized = false + r.storeResolver, r.storePromise = promise.New[Store[T]]() + r.resetCtx, r.resetCancel = context.WithCancel(context.Background()) +} + +func (r *resource[T]) resourceName() string { + if r.opts.name != "" { + return r.opts.name + } + + // We create a new pointer to the reconciled resource type. + // For example, with resource[*cilium_api_v2.CiliumNode] new(T) returns **cilium_api_v2.CiliumNode + // and *new(T) is nil. So we create a new pointer using reflect.New() + o := *new(T) + sourceObj := reflect.New(reflect.TypeOf(o).Elem()).Interface().(T) + + gvk, err := apiutil.GVKForObject(sourceObj, scheme) + if err != nil { + return "" + } + + return strings.ToLower(gvk.Kind) +} + +type subscriber[T k8sRuntime.Object] struct { + r *resource[T] + debugInfo string + wq workqueue.RateLimitingInterface + options eventsOpts +} + +func (s *subscriber[T]) processLoop(ctx context.Context, out chan Event[T], store Store[T]) { + // Make sure to call ShutDown() in the end. Calling ShutDownWithDrain is not + // enough as DelayingQueue does not implement it, so without ShutDown() we'd + // leak the (*delayingType).waitingLoop. + defer s.wq.ShutDown() + + doneFinalizer := func(done *bool) { + // If you get here it is because an Event[T] was handed to a subscriber + // that forgot to call Event[T].Done(). + // + // Calling Done() is needed to mark the event as handled. This allows + // the next event for the same key to be handled and is used to clear + // rate limiting and retry counts of prior failures. + panic(fmt.Sprintf( + "%s has a broken event handler that did not call Done() "+ + "before event was garbage collected", + s.debugInfo)) + } + + // To synthesize delete events to the subscriber we keep track of the last know state + // of the object given to the subscriber. Objects are cleaned from this map when delete + // events are successfully processed. + var lastKnownObjects lastKnownObjects[T] + +loop: + for { + // Retrieve an item from the subscribers queue and then fetch the object + // from the store. + workItem, shutdown := s.getWorkItem() + if shutdown { + break + } + + var event Event[T] + + switch workItem := workItem.(type) { + case syncWorkItem: + event.Kind = Sync + case keyWorkItem: + obj, exists, err := store.GetByKey(workItem.key) + if !exists || err != nil { + // The object no longer exists in the store and thus has been deleted. + deletedObject, ok := lastKnownObjects.Load(workItem.key) + if !ok { + // Object was never seen by the subscriber. Ignore the event. + s.wq.Done(workItem) + continue loop + } + event.Kind = Delete + event.Key = workItem.key + event.Object = deletedObject + } else { + lastKnownObjects.Store(workItem.key, obj) + event.Kind = Upsert + event.Key = workItem.key + event.Object = obj + } + default: + panic(fmt.Sprintf("%T: unknown work item %T", s.r, workItem)) + } + + // eventDoneSentinel is a heap allocated object referenced by Done(). + // If Done() is not called, a finalizer set on this object will be invoked + // which panics. If Done() is called, the finalizer is unset. + var eventDoneSentinel = new(bool) + event.Done = func(err error) { + runtime.SetFinalizer(eventDoneSentinel, nil) + + if err == nil && event.Kind == Delete { + // Deletion processed successfully. Remove it from the set of + // deleted objects unless it was replaced by an upsert or newer + // deletion. + lastKnownObjects.DeleteByUID(event.Key, event.Object) + } + + s.eventDone(workItem, err) + + s.r.metricEventProcessed(event.Kind, err == nil) + } + + // Add a finalizer to catch forgotten calls to Done(). + runtime.SetFinalizer(eventDoneSentinel, doneFinalizer) + + select { + case out <- event: + case <-ctx.Done(): + // Subscriber cancelled or resource is shutting down. We're not requiring + // the subscriber to drain the channel, so we're marking the event done here + // and not sending it. + event.Done(nil) + + // Drain the queue without further processing. + for { + _, shutdown := s.getWorkItem() + if shutdown { + return + } + } + } + } +} + +func (s *subscriber[T]) getWorkItem() (e workItem, shutdown bool) { + var raw any + raw, shutdown = s.wq.Get() + if shutdown { + return + } + return raw.(workItem), false +} + +func (s *subscriber[T]) enqueueSync() { + s.wq.Add(syncWorkItem{}) +} + +func (s *subscriber[T]) enqueueKey(key Key) { + s.wq.Add(keyWorkItem{key}) +} + +func (s *subscriber[T]) eventDone(entry workItem, err error) { + // This is based on the example found in k8s.io/client-go/examples/worsueue/main.go. + + // Mark the object as done being processed. If it was marked dirty + // during processing, it'll be processed again. + defer s.wq.Done(entry) + + if err != nil { + numRequeues := s.wq.NumRequeues(entry) + + var action ErrorAction + switch entry := entry.(type) { + case syncWorkItem: + action = s.options.errorHandler(Key{}, numRequeues, err) + case keyWorkItem: + action = s.options.errorHandler(entry.key, numRequeues, err) + default: + panic(fmt.Sprintf("keyQueue: unhandled entry %T", entry)) + } + + switch action { + case ErrorActionRetry: + s.wq.AddRateLimited(entry) + case ErrorActionStop: + s.wq.ShutDown() + case ErrorActionIgnore: + s.wq.Forget(entry) + default: + panic(fmt.Sprintf("keyQueue: unknown action %q from error handler %v", action, s.options.errorHandler)) + } + } else { + // As the object was processed successfully we can "forget" it. + // This clears any rate limiter state associated with this object, so + // it won't be throttled based on previous failure history. + s.wq.Forget(entry) + } +} + +// lastKnownObjects stores the last known state of an object from a subscriber's +// perspective. It is used to emit delete events with the last known state of +// the object. +type lastKnownObjects[T k8sRuntime.Object] struct { + mu lock.RWMutex + objs map[Key]T +} + +func (l *lastKnownObjects[T]) Load(key Key) (obj T, ok bool) { + l.mu.RLock() + defer l.mu.RUnlock() + obj, ok = l.objs[key] + return +} + +func (l *lastKnownObjects[T]) Store(key Key, obj T) { + l.mu.Lock() + defer l.mu.Unlock() + if l.objs == nil { + l.objs = map[Key]T{} + } + l.objs[key] = obj +} + +// DeleteByUID removes the object, but only if the UID matches. UID +// might not match if the object has been re-created with the same key +// after deletion and thus Store'd again here. Once that incarnation +// is deleted, we will be here again and the UID will match. +func (l *lastKnownObjects[T]) DeleteByUID(key Key, objToDelete T) { + l.mu.Lock() + defer l.mu.Unlock() + + if obj, ok := l.objs[key]; ok { + if getUID(obj) == getUID(objToDelete) { + delete(l.objs, key) + } + } +} + +// workItem restricts the set of types we use when type-switching over the +// queue entries, so that we'll get a compiler error on impossible types. +// +// The queue entries must be kept comparable and not be pointers as we want +// to be able to coalesce multiple keyEntry's into a single element in the +// queue. +type workItem interface { + isWorkItem() +} + +// syncWorkItem marks the store as synchronized and thus a 'Sync' event can be +// emitted to the subscriber. +type syncWorkItem struct{} + +func (syncWorkItem) isWorkItem() {} + +// keyWorkItem marks work for a specific key. Whether this is an upsert or delete +// depends on the state of the store at the time this work item is processed. +type keyWorkItem struct { + key Key +} + +func (keyWorkItem) isWorkItem() {} + +type wrapperController struct { + cache.Controller + cacheMutationDetector cache.MutationDetector +} + +func (p *wrapperController) Run(stopCh <-chan struct{}) { + go p.cacheMutationDetector.Run(stopCh) + p.Controller.Run(stopCh) +} + +func (r *resource[T]) newInformer() (cache.Indexer, cache.Controller) { + clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, r.opts.indexers) + opts := cache.DeltaFIFOOptions{KeyFunction: cache.MetaNamespaceKeyFunc, KnownObjects: clientState} + fifo := cache.NewDeltaFIFOWithOptions(opts) + transformer := r.opts.transform + cacheMutationDetector := cache.NewCacheMutationDetector(fmt.Sprintf("%T", r)) + cfg := &cache.Config{ + Queue: fifo, + ListerWatcher: r.lw, + ObjectType: r.opts.sourceObj(), + FullResyncPeriod: 0, + RetryOnError: false, + Process: func(obj interface{}, isInInitialList bool) error { + // Processing of the deltas is done under the resource mutex. This + // avoids emitting double events for new subscribers that list the + // keys in the store. + r.mu.RLock() + defer r.mu.RUnlock() + + for _, d := range obj.(cache.Deltas) { + var obj interface{} + if transformer != nil { + var err error + if obj, err = transformer(d.Object); err != nil { + return err + } + } else { + obj = d.Object + } + + // In CI we detect if the objects were modified and panic + // (e.g. when KUBE_CACHE_MUTATION_DETECTOR is set) + // this is a no-op in production environments. + cacheMutationDetector.AddObject(obj) + + key := NewKey(obj) + + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + metric := resources.MetricCreate + if d.Type != cache.Added { + metric = resources.MetricUpdate + } + r.metricEventReceived(metric, true, false) + + if _, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + } else { + if err := clientState.Add(obj); err != nil { + return err + } + } + + for _, sub := range r.subscribers { + sub.enqueueKey(key) + } + case cache.Deleted: + r.metricEventReceived(resources.MetricDelete, true, false) + + if err := clientState.Delete(obj); err != nil { + return err + } + + for _, sub := range r.subscribers { + sub.enqueueKey(key) + } + } + } + return nil + }, + } + return clientState, &wrapperController{ + Controller: cache.New(cfg), + cacheMutationDetector: cacheMutationDetector, + } +} + +func getUID(obj k8sRuntime.Object) types.UID { + meta, err := meta.Accessor(obj) + if err != nil { + // If we get here, it means the object does not implement ObjectMeta, and thus + // the Resource[T] has been instantianted with an unsuitable type T. + // As this would be catched immediately during development, panicing is the + // way. + panic(fmt.Sprintf("BUG: meta.Accessor() failed on %T: %s", obj, err)) + } + return meta.GetUID() +} + +func merge[T any](c1, c2 <-chan T) <-chan T { + m := make(chan T) + go func() { + select { + case <-c1: + case <-c2: + } + close(m) + }() + return m +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/scheme.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/scheme.go new file mode 100644 index 0000000000..3c976d3b03 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/scheme.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + cilium_api_v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" +) + +var scheme = runtime.NewScheme() + +var localSchemeBuilder = runtime.SchemeBuilder{ + corev1.AddToScheme, + discoveryv1beta1.AddToScheme, + discoveryv1.AddToScheme, + networkingv1.AddToScheme, + cilium_api_v2.AddToScheme, + cilium_api_v2alpha1.AddToScheme, +} + +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/store.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/store.go new file mode 100644 index 0000000000..9dec4cbad0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/store.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resource + +import ( + corev1 "k8s.io/api/core/v1" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +// Store is a read-only typed wrapper for cache.Store. +type Store[T k8sRuntime.Object] interface { + // List returns all items currently in the store. + List() []T + + // IterKeys returns a key iterator. + IterKeys() KeyIter + + // Get returns the latest version by deriving the key from the given object. + Get(obj T) (item T, exists bool, err error) + + // GetByKey returns the latest version of the object with given key. + GetByKey(key Key) (item T, exists bool, err error) + + // IndexKeys returns the keys of the stored objects whose set of indexed values + // for the index includes the given indexed value. + IndexKeys(indexName, indexedValue string) ([]string, error) + + // ByIndex returns the stored objects whose set of indexed values for the index + // includes the given indexed value. + ByIndex(indexName, indexedValue string) ([]T, error) + + // CacheStore returns the underlying cache.Store instance. Use for temporary + // compatibility purposes only! + CacheStore() cache.Store + + // Release the store and allows the associated resource to stop its informer if + // this is the last reference to it. + // This is a no-op if the resource is not releasable. + Release() +} + +// typedStore implements Store on top of an untyped cache.Indexer. +type typedStore[T k8sRuntime.Object] struct { + store cache.Indexer + release func() +} + +var _ Store[*corev1.Node] = &typedStore[*corev1.Node]{} + +func (s *typedStore[T]) List() []T { + items := s.store.List() + result := make([]T, len(items)) + for i := range items { + result[i] = items[i].(T) + } + return result +} + +func (s *typedStore[T]) IterKeys() KeyIter { + return &keyIterImpl{keys: s.store.ListKeys(), pos: -1} +} + +func (s *typedStore[T]) Get(obj T) (item T, exists bool, err error) { + return s.GetByKey(NewKey(obj)) +} + +func (s *typedStore[T]) GetByKey(key Key) (item T, exists bool, err error) { + var itemAny any + itemAny, exists, err = s.store.GetByKey(key.String()) + if exists { + item = itemAny.(T) + } + return +} + +func (s *typedStore[T]) IndexKeys(indexName, indexedValue string) ([]string, error) { + return s.store.IndexKeys(indexName, indexedValue) +} + +func (s *typedStore[T]) ByIndex(indexName, indexedValue string) ([]T, error) { + itemsAny, err := s.store.ByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + items := make([]T, 0, len(itemsAny)) + for _, item := range itemsAny { + items = append(items, item.(T)) + } + return items, nil +} + +func (s *typedStore[T]) CacheStore() cache.Store { + return s.store +} + +func (s *typedStore[T]) Release() { + s.release() +} + +type KeyIter interface { + // Next returns true if there is a key, false if iteration has finished. + Next() bool + Key() Key +} + +type keyIterImpl struct { + keys []string + pos int +} + +func (it *keyIterImpl) Next() bool { + it.pos++ + return it.pos < len(it.keys) +} + +func (it *keyIterImpl) Key() Key { + ns, name, _ := cache.SplitMetaNamespaceKey(it.keys[it.pos]) + // ignoring error from SplitMetaNamespaceKey as the string is from + // the cache. + return Key{Namespace: ns, Name: name} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go new file mode 100644 index 0000000000..a0eb8826a2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + "sync" + + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + + "github.com/cilium/cilium/pkg/hive/cell" + cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + cilium_api_v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "github.com/cilium/cilium/pkg/k8s/client" + "github.com/cilium/cilium/pkg/k8s/resource" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + slim_discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/k8s/version" + "github.com/cilium/cilium/pkg/node" +) + +// Config defines the configuration options for k8s resources. +type Config struct { + EnableK8sEndpointSlice bool + + // K8sServiceProxyName is the value of service.kubernetes.io/service-proxy-name label, + // that identifies the service objects Cilium should handle. + // If the provided value is an empty string, Cilium will manage service objects when + // the label is not present. For more details - + // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/2447-Make-kube-proxy-service-abstraction-optional + K8sServiceProxyName string +} + +// DefaultConfig represents the default k8s resources config values. +var DefaultConfig = Config{ + EnableK8sEndpointSlice: true, +} + +// Flags implements the cell.Flagger interface. +func (def Config) Flags(flags *pflag.FlagSet) { + flags.Bool("enable-k8s-endpoint-slice", def.EnableK8sEndpointSlice, "Enables k8s EndpointSlice feature in Cilium if the k8s cluster supports it") + flags.String("k8s-service-proxy-name", def.K8sServiceProxyName, "Value of K8s service-proxy-name label for which Cilium handles the services (empty = all services without service.kubernetes.io/service-proxy-name label)") +} + +// ServiceResource builds the Resource[Service] object. +func ServiceResource(lc cell.Lifecycle, cfg Config, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_corev1.Service], error) { + if !cs.IsEnabled() { + return nil, nil + } + optsModifier, err := utils.GetServiceAndEndpointListOptionsModifier(cfg.K8sServiceProxyName) + if err != nil { + return nil, err + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_corev1.ServiceList](cs.Slim().CoreV1().Services("")), + append(opts, optsModifier)..., + ) + return resource.New[*slim_corev1.Service](lc, lw, resource.WithMetric("Service")), nil +} + +func NodeResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_corev1.Node], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_corev1.NodeList](cs.Slim().CoreV1().Nodes()), + opts..., + ) + return resource.New[*slim_corev1.Node](lc, lw, resource.WithMetric("Node")), nil +} + +func CiliumNodeResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2.CiliumNode], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumNodeList](cs.CiliumV2().CiliumNodes()), + opts..., + ) + return resource.New[*cilium_api_v2.CiliumNode](lc, lw, resource.WithMetric("CiliumNode")), nil +} + +func PodResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_corev1.Pod], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_corev1.PodList](cs.Slim().CoreV1().Pods("")), + opts..., + ) + return resource.New[*slim_corev1.Pod](lc, lw, resource.WithMetric("Pod")), nil +} + +func NamespaceResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_corev1.Namespace], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_corev1.NamespaceList](cs.Slim().CoreV1().Namespaces()), + opts..., + ) + return resource.New[*slim_corev1.Namespace](lc, lw, resource.WithMetric("Namespace")), nil +} + +func LBIPPoolsResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2alpha1.CiliumLoadBalancerIPPool], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2alpha1.CiliumLoadBalancerIPPoolList](cs.CiliumV2alpha1().CiliumLoadBalancerIPPools()), + opts..., + ) + return resource.New[*cilium_api_v2alpha1.CiliumLoadBalancerIPPool](lc, lw, resource.WithMetric("CiliumLoadBalancerIPPool")), nil +} + +func CiliumIdentityResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2.CiliumIdentity], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumIdentityList](cs.CiliumV2().CiliumIdentities()), + opts..., + ) + return resource.New[*cilium_api_v2.CiliumIdentity](lc, lw, resource.WithMetric("CiliumIdentityList")), nil +} + +func NetworkPolicyResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_networkingv1.NetworkPolicy], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_networkingv1.NetworkPolicyList](cs.Slim().NetworkingV1().NetworkPolicies("")), + opts..., + ) + return resource.New[*slim_networkingv1.NetworkPolicy](lc, lw, resource.WithMetric("NetworkPolicy")), nil +} + +func CiliumNetworkPolicyResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2.CiliumNetworkPolicy], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumNetworkPolicyList](cs.CiliumV2().CiliumNetworkPolicies("")), + opts..., + ) + return resource.New[*cilium_api_v2.CiliumNetworkPolicy](lc, lw, resource.WithMetric("CiliumNetworkPolicy")), nil +} + +func CiliumClusterwideNetworkPolicyResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2.CiliumClusterwideNetworkPolicy], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumClusterwideNetworkPolicyList](cs.CiliumV2().CiliumClusterwideNetworkPolicies()), + opts..., + ) + return resource.New[*cilium_api_v2.CiliumClusterwideNetworkPolicy](lc, lw, resource.WithMetric("CiliumClusterwideNetworkPolicy")), nil +} + +func CiliumCIDRGroupResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2alpha1.CiliumCIDRGroup], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2alpha1.CiliumCIDRGroupList](cs.CiliumV2alpha1().CiliumCIDRGroups()), + opts..., + ) + return resource.New[*cilium_api_v2alpha1.CiliumCIDRGroup](lc, lw, resource.WithMetric("CiliumCIDRGroup")), nil +} + +func CiliumPodIPPoolResource(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2alpha1.CiliumPodIPPool], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2alpha1.CiliumPodIPPoolList](cs.CiliumV2alpha1().CiliumPodIPPools()), + opts..., + ) + return resource.New[*cilium_api_v2alpha1.CiliumPodIPPool](lc, lw, resource.WithMetric("CiliumPodIPPool")), nil +} + +func EndpointsResource(lc cell.Lifecycle, cfg Config, cs client.Clientset) (resource.Resource[*Endpoints], error) { + if !cs.IsEnabled() { + return nil, nil + } + endpointsOptsModifier, err := utils.GetServiceAndEndpointListOptionsModifier(cfg.K8sServiceProxyName) + if err != nil { + return nil, err + } + + endpointSliceOpsModifier, err := utils.GetEndpointSliceListOptionsModifier() + if err != nil { + return nil, err + } + lw := &endpointsListerWatcher{ + cs: cs, + enableK8sEndpointSlice: cfg.EnableK8sEndpointSlice, + endpointsOptsModifier: endpointsOptsModifier, + endpointSlicesOptsModifier: endpointSliceOpsModifier, + } + return resource.New[*Endpoints]( + lc, + lw, + resource.WithLazyTransform(lw.getSourceObj, transformEndpoint), + resource.WithMetric("Endpoint"), + resource.WithName("endpoints"), + ), nil +} + +// endpointsListerWatcher implements List and Watch for endpoints/endpointslices. It +// performs the capability check on first call to List/Watch. This allows constructing +// the resource before the client has been started and capabilities have been probed. +type endpointsListerWatcher struct { + cs client.Clientset + enableK8sEndpointSlice bool + endpointsOptsModifier func(*metav1.ListOptions) + endpointSlicesOptsModifier func(*metav1.ListOptions) + sourceObj k8sRuntime.Object + + once sync.Once + cachedListerWatcher cache.ListerWatcher +} + +func (lw *endpointsListerWatcher) getSourceObj() k8sRuntime.Object { + lw.getListerWatcher() // force the construction + return lw.sourceObj +} + +func (lw *endpointsListerWatcher) getListerWatcher() cache.ListerWatcher { + lw.once.Do(func() { + if lw.enableK8sEndpointSlice && version.Capabilities().EndpointSlice { + if version.Capabilities().EndpointSliceV1 { + log.Info("Using discoveryv1.EndpointSlice") + lw.cachedListerWatcher = utils.ListerWatcherFromTyped[*slim_discoveryv1.EndpointSliceList]( + lw.cs.Slim().DiscoveryV1().EndpointSlices(""), + ) + lw.sourceObj = &slim_discoveryv1.EndpointSlice{} + } else { + log.Info("Using discoveryv1beta1.EndpointSlice") + lw.cachedListerWatcher = utils.ListerWatcherFromTyped[*slim_discoveryv1beta1.EndpointSliceList]( + lw.cs.Slim().DiscoveryV1beta1().EndpointSlices(""), + ) + lw.sourceObj = &slim_discoveryv1beta1.EndpointSlice{} + } + lw.cachedListerWatcher = utils.ListerWatcherWithModifier(lw.cachedListerWatcher, lw.endpointSlicesOptsModifier) + } else { + log.Info("Using v1.Endpoints") + lw.cachedListerWatcher = utils.ListerWatcherFromTyped[*slim_corev1.EndpointsList]( + lw.cs.Slim().CoreV1().Endpoints(""), + ) + lw.sourceObj = &slim_corev1.Endpoints{} + lw.cachedListerWatcher = utils.ListerWatcherWithModifier(lw.cachedListerWatcher, lw.endpointsOptsModifier) + } + }) + return lw.cachedListerWatcher +} + +func (lw *endpointsListerWatcher) List(opts metav1.ListOptions) (k8sRuntime.Object, error) { + return lw.getListerWatcher().List(opts) +} + +func (lw *endpointsListerWatcher) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return lw.getListerWatcher().Watch(opts) +} + +func transformEndpoint(obj any) (any, error) { + switch obj := obj.(type) { + case *slim_corev1.Endpoints: + return ParseEndpoints(obj), nil + case *slim_discoveryv1.EndpointSlice: + return ParseEndpointSliceV1(obj), nil + case *slim_discoveryv1beta1.EndpointSlice: + return ParseEndpointSliceV1Beta1(obj), nil + default: + return nil, fmt.Errorf("%T not a known endpoint or endpoint slice object", obj) + } +} + +// CiliumSlimEndpointResource uses the "localNode" IndexFunc to build the resource indexer. +// The IndexFunc accesses the local node info to get its IP, so it depends on the local node store +// to initialize it before the first access. +// To reflect this, the node.LocalNodeStore dependency is explicitly requested in the function +// signature. +func CiliumSlimEndpointResource(lc cell.Lifecycle, cs client.Clientset, _ *node.LocalNodeStore, opts ...func(*metav1.ListOptions)) (resource.Resource[*types.CiliumEndpoint], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumEndpointList](cs.CiliumV2().CiliumEndpoints(slim_corev1.NamespaceAll)), + opts..., + ) + indexers := cache.Indexers{ + "localNode": ciliumEndpointLocalPodIndexFunc, + } + return resource.New[*types.CiliumEndpoint](lc, lw, + resource.WithLazyTransform(func() runtime.Object { + return &cilium_api_v2.CiliumEndpoint{} + }, TransformToCiliumEndpoint), + resource.WithMetric("CiliumEndpoint"), + resource.WithIndexers(indexers), + resource.WithStoppableInformer(), + ), nil +} + +// ciliumEndpointLocalPodIndexFunc is an IndexFunc that indexes only local +// CiliumEndpoints, by their local Node IP. +func ciliumEndpointLocalPodIndexFunc(obj any) ([]string, error) { + cep, ok := obj.(*types.CiliumEndpoint) + if !ok { + return nil, fmt.Errorf("unexpected object type: %T", obj) + } + indices := []string{} + if cep.Networking == nil { + log.WithField("ciliumendpoint", cep.GetNamespace()+"/"+cep.GetName()). + Debug("cannot index CiliumEndpoint by node without network status") + return nil, nil + } + if cep.Networking.NodeIP == node.GetCiliumEndpointNodeIP() { + indices = append(indices, cep.Networking.NodeIP) + } + return indices, nil +} + +// CiliumEndpointSliceResource uses the "localNode" IndexFunc to build the resource indexer. +// The IndexFunc accesses the local node info to get its IP, so it depends on the local node store +// to initialize it before the first access. +// To reflect this, the node.LocalNodeStore dependency is explicitly requested in the function +// signature. +func CiliumEndpointSliceResource(lc cell.Lifecycle, cs client.Clientset, _ *node.LocalNodeStore, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2alpha1.CiliumEndpointSlice], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2alpha1.CiliumEndpointSliceList](cs.CiliumV2alpha1().CiliumEndpointSlices()), + opts..., + ) + indexers := cache.Indexers{ + "localNode": ciliumEndpointSliceLocalPodIndexFunc, + } + return resource.New[*cilium_api_v2alpha1.CiliumEndpointSlice](lc, lw, + resource.WithMetric("CiliumEndpointSlice"), + resource.WithIndexers(indexers), + resource.WithStoppableInformer(), + ), nil +} + +// ciliumEndpointSliceLocalPodIndexFunc is an IndexFunc that indexes CiliumEndpointSlices +// by their corresponding Pod, which are running locally on this Node. +func ciliumEndpointSliceLocalPodIndexFunc(obj any) ([]string, error) { + ces, ok := obj.(*cilium_api_v2alpha1.CiliumEndpointSlice) + if !ok { + return nil, fmt.Errorf("unexpected object type: %T", obj) + } + indices := []string{} + for _, ep := range ces.Endpoints { + if ep.Networking.NodeIP == node.GetCiliumEndpointNodeIP() { + indices = append(indices, ep.Networking.NodeIP) + break + } + } + return indices, nil +} + +func CiliumExternalWorkloads(lc cell.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*cilium_api_v2.CiliumExternalWorkload], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*cilium_api_v2.CiliumExternalWorkloadList](cs.CiliumV2().CiliumExternalWorkloads()), + opts..., + ) + return resource.New[*cilium_api_v2.CiliumExternalWorkload](lc, lw, resource.WithMetric("CiliumExternalWorkloads")), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/rule_translate.go b/vendor/github.com/cilium/cilium/pkg/k8s/rule_translate.go new file mode 100644 index 0000000000..d16b4ee849 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/rule_translate.go @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "fmt" + "net" + "net/netip" + + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/policy" + "github.com/cilium/cilium/pkg/policy/api" + "github.com/cilium/cilium/pkg/slices" +) + +var _ policy.Translator = RuleTranslator{} + +// RuleTranslator implements pkg/policy.Translator interface +// Translate populates/depopulates given rule with ToCIDR rules +// Based on provided service/endpoint +type RuleTranslator struct { + Service ServiceID + OldEndpoint, NewEndpoint Endpoints + ServiceLabels map[string]string +} + +// Translate calls TranslateEgress on all r.Egress rules +func (k RuleTranslator) Translate(r *api.Rule, result *policy.TranslationResult) error { + for egressIndex := range r.Egress { + err := k.TranslateEgress(&r.Egress[egressIndex], result) + if err != nil { + return err + } + } + return nil +} + +// TranslateEgress populates/depopulates egress rules with ToCIDR entries based +// on toService entries +func (k RuleTranslator) TranslateEgress(r *api.EgressRule, result *policy.TranslationResult) error { + defer r.SetAggregatedSelectors() + err := k.depopulateEgress(r, result) + if err != nil { + return err + } + + err = k.populateEgress(r, result) + if err != nil { + return err + } + + if len(result.PrefixesToAdd) > 0 || len(result.PrefixesToRelease) > 0 { + release := slices.Diff(result.PrefixesToRelease, result.PrefixesToAdd) + add := slices.Diff(result.PrefixesToAdd, result.PrefixesToRelease) + result.PrefixesToRelease = release + result.PrefixesToAdd = add + } + return nil +} + +func (k RuleTranslator) populateEgress(r *api.EgressRule, result *policy.TranslationResult) error { + for _, service := range r.ToServices { + if k.serviceMatches(service) { + if backendPrefixes, err := k.generateToCidrFromEndpoint(r, k.NewEndpoint); err != nil { + return err + } else { + result.PrefixesToAdd = append(result.PrefixesToAdd, backendPrefixes...) + } + // TODO: generateToPortsFromEndpoint when ToPorts and ToCIDR are compatible + } + } + return nil +} + +func (k RuleTranslator) depopulateEgress(r *api.EgressRule, result *policy.TranslationResult) error { + for _, service := range r.ToServices { + // NumToServicesRules are only counted in depopulate to avoid + // counting rules twice + result.NumToServicesRules++ + if k.serviceMatches(service) { + if prefixesToRelease, err := k.deleteToCidrFromEndpoint(r, k.OldEndpoint); err != nil { + return err + } else { + result.PrefixesToRelease = append(result.PrefixesToRelease, prefixesToRelease...) + } + // TODO: generateToPortsFromEndpoint when ToPorts and ToCIDR are compatible + } + } + return nil +} + +func (k RuleTranslator) serviceMatches(service api.Service) bool { + if service.K8sServiceSelector != nil { + es := api.EndpointSelector(service.K8sServiceSelector.Selector) + es.SyncRequirementsWithLabelSelector() + esMatches := es.Matches(labels.Set(k.ServiceLabels)) + return esMatches && + (service.K8sServiceSelector.Namespace == k.Service.Namespace || service.K8sServiceSelector.Namespace == "") + } + + if service.K8sService != nil { + return service.K8sService.ServiceName == k.Service.Name && + (service.K8sService.Namespace == k.Service.Namespace || service.K8sService.Namespace == "") + } + + return false +} + +// generateToCidrFromEndpoint takes an egress rule and populates it with +// ToCIDR rules based on provided endpoint object +func (k RuleTranslator) generateToCidrFromEndpoint( + egress *api.EgressRule, + endpoints Endpoints, +) ([]netip.Prefix, error) { + prefixes := endpoints.Prefixes() + + // This will generate one-address CIDRs consisting of endpoint backend ip + for addrCluster := range endpoints.Backends { + epIP := addrCluster.Addr() + + found := false + for _, c := range egress.ToCIDRSet { + prefix, err := netip.ParsePrefix(string(c.Cidr)) + if err != nil { + return nil, err + } + if prefix.Contains(epIP) { + found = true + break + } + } + if !found { + mask := 32 + if epIP.Is6() { + mask = 128 + } + cidr := netip.PrefixFrom(epIP, mask) + egress.ToCIDRSet = append(egress.ToCIDRSet, api.CIDRRule{ + Cidr: api.CIDR(cidr.String()), + Generated: true, + }) + } + } + return prefixes, nil +} + +// deleteToCidrFromEndpoint takes an egress rule and removes ToCIDR rules +// matching endpoint. Returns an error if any of the backends are malformed. +// +// If all backends are valid, returns any CIDR mappings that are being removed +// from the policy. The caller must attempt to release this via the IPCache +// identity release functions. +func (k RuleTranslator) deleteToCidrFromEndpoint( + egress *api.EgressRule, + endpoints Endpoints, +) ([]netip.Prefix, error) { + + var toReleasePrefixes []netip.Prefix + delCIDRRules := make(map[int]*api.CIDRRule, len(egress.ToCIDRSet)) + + for addrCluster := range endpoints.Backends { + ipStr := addrCluster.Addr().String() + + epIP := net.ParseIP(ipStr) + if epIP == nil { + return nil, fmt.Errorf("unable to parse ip: %s", ipStr) + } + + for i, c := range egress.ToCIDRSet { + if _, ok := delCIDRRules[i]; ok { + // it's already going to be deleted so we can continue + continue + } + _, cidr, err := net.ParseCIDR(string(c.Cidr)) + if err != nil { + return nil, err + } + // delete all generated CIDRs for a CIDR that match the given + // endpoint + if c.Generated && cidr.Contains(epIP) { + delCIDRRules[i] = &egress.ToCIDRSet[i] + } + } + if len(delCIDRRules) == len(egress.ToCIDRSet) { + break + } + } + + // If no rules were deleted we can do an early return here and avoid doing + // the useless operations below. + if len(delCIDRRules) == 0 { + return toReleasePrefixes, nil + } + + delSlice := make([]api.CIDRRule, 0, len(egress.ToCIDRSet)) + for _, delCIDRRule := range delCIDRRules { + delSlice = append(delSlice, *delCIDRRule) + } + toReleasePrefixes = policy.GetPrefixesFromCIDRSet(delSlice) + + // if endpoint is not in CIDR or it's not generated it's ok to retain it + newCIDRRules := make([]api.CIDRRule, 0, len(egress.ToCIDRSet)-len(delCIDRRules)) + for i, c := range egress.ToCIDRSet { + // If the rule was deleted then it shouldn't be re-added + if _, ok := delCIDRRules[i]; ok { + continue + } + newCIDRRules = append(newCIDRRules, c) + } + + egress.ToCIDRSet = newCIDRRules + + return toReleasePrefixes, nil +} + +// PreprocessRules translates egress rules that apply to external services (ToServices) +func PreprocessRules(r api.Rules, cache *ServiceCache) error { + + cache.mutex.Lock() + defer cache.mutex.Unlock() + + for _, rule := range r { + // Translate only handles egress rules + if rule.Egress == nil { + continue + } + for ns, ep := range cache.endpoints { + svc, ok := cache.services[ns] + // Normally, only services without a label selector (i.e. empty services) + // are allowed as targets of a toServices rule. + // This is to minimize the chances of a pod IP being selected by this rule, which might + // cause conflicting entries in the ipcache. + // + // This requirement, however, is dropped for HighScale IPCache mode, because pod IPs are + // normally excluded from the ipcache regardless. + if ok && (option.Config.EnableHighScaleIPcache || svc.IsExternal()) { + eps := ep.GetEndpoints() + if eps != nil { + t := NewK8sTranslator(ns, Endpoints{}, *eps, svc.Labels) + // We don't need to check the translation result here because the k8s + // RuleTranslator above sets allocatePrefixes to be false. + err := t.Translate(rule, &policy.TranslationResult{}) + if err != nil { + return err + } + } + } + } + } + return nil +} + +// NewK8sTranslator returns RuleTranslator. +// If allocatePrefixes is set to true, then translation calls will return +// prefixes that need to be allocated or deallocated. +func NewK8sTranslator( + serviceInfo ServiceID, + oldEPs, newEPs Endpoints, + labels map[string]string, +) RuleTranslator { + return RuleTranslator{ + Service: serviceInfo, + OldEndpoint: oldEPs, + NewEndpoint: newEPs, + ServiceLabels: labels, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/service.go b/vendor/github.com/cilium/cilium/pkg/k8s/service.go new file mode 100644 index 0000000000..fa9261fd34 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/service.go @@ -0,0 +1,730 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + + "github.com/cilium/cilium/pkg/annotation" + "github.com/cilium/cilium/pkg/cidr" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/comparator" + "github.com/cilium/cilium/pkg/datapath/types" + "github.com/cilium/cilium/pkg/ip" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/loadbalancer" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + serviceStore "github.com/cilium/cilium/pkg/service/store" +) + +const ( + serviceAffinityNone = "" + serviceAffinityLocal = "local" + serviceAffinityRemote = "remote" +) + +func getAnnotationIncludeExternal(svc *slim_corev1.Service) bool { + if value, ok := annotation.Get(svc, annotation.GlobalService, annotation.GlobalServiceAlias); ok { + return strings.ToLower(value) == "true" + } + + return false +} + +func getAnnotationShared(svc *slim_corev1.Service) bool { + // The SharedService annotation is ignored if the service is not declared as global. + if !getAnnotationIncludeExternal(svc) { + return false + } + + if value, ok := annotation.Get(svc, annotation.SharedService, annotation.SharedServiceAlias); ok { + return strings.ToLower(value) == "true" + } + + // A global service is marked as shared by default. + return true +} + +func getAnnotationServiceAffinity(svc *slim_corev1.Service) string { + // The ServiceAffinity annotation is ignored if the service is not declared as global. + if !getAnnotationIncludeExternal(svc) { + return serviceAffinityNone + } + + if value, ok := annotation.Get(svc, annotation.ServiceAffinity, annotation.ServiceAffinityAlias); ok { + return strings.ToLower(value) + } + + return serviceAffinityNone +} + +func getAnnotationTopologyAwareHints(svc *slim_corev1.Service) bool { + // v1.DeprecatedAnnotationTopologyAwareHints has precedence over v1.AnnotationTopologyMode. + value, ok := svc.ObjectMeta.Annotations[v1.DeprecatedAnnotationTopologyAwareHints] + if !ok { + value = svc.ObjectMeta.Annotations[v1.AnnotationTopologyMode] + } + return strings.ToLower(value) == "auto" +} + +// isValidServiceFrontendIP returns true if the provided service frontend IP address type +// is supported in cilium configuration. +func isValidServiceFrontendIP(netIP net.IP) bool { + if (option.Config.EnableIPv4 && ip.IsIPv4(netIP)) || (option.Config.EnableIPv6 && ip.IsIPv6(netIP)) { + return true + } + + return false +} + +// ParseServiceID parses a Kubernetes service and returns the ServiceID +func ParseServiceID(svc *slim_corev1.Service) ServiceID { + return ServiceID{ + Name: svc.ObjectMeta.Name, + Namespace: svc.ObjectMeta.Namespace, + } +} + +// ParseService parses a Kubernetes service and returns a Service. +func ParseService(svc *slim_corev1.Service, nodeAddressing types.NodeAddressing) (ServiceID, *Service) { + scopedLog := log.WithFields(logrus.Fields{ + logfields.K8sSvcName: svc.ObjectMeta.Name, + logfields.K8sNamespace: svc.ObjectMeta.Namespace, + logfields.K8sAPIVersion: svc.TypeMeta.APIVersion, + logfields.K8sSvcType: svc.Spec.Type, + }) + var loadBalancerIPs []string + + svcID := ParseServiceID(svc) + + var svcType loadbalancer.SVCType + switch svc.Spec.Type { + case slim_corev1.ServiceTypeClusterIP: + svcType = loadbalancer.SVCTypeClusterIP + + case slim_corev1.ServiceTypeNodePort: + svcType = loadbalancer.SVCTypeNodePort + + case slim_corev1.ServiceTypeLoadBalancer: + svcType = loadbalancer.SVCTypeLoadBalancer + + case slim_corev1.ServiceTypeExternalName: + // External-name services must be ignored + return ServiceID{}, nil + + default: + scopedLog.Warn("Ignoring k8s service: unsupported type") + return ServiceID{}, nil + } + + if svc.Spec.ClusterIP == "" && (!option.Config.EnableNodePort || len(svc.Spec.ExternalIPs) == 0) { + return ServiceID{}, nil + } + + var clusterIPs []net.IP + if len(svc.Spec.ClusterIPs) == 0 { + if clsIP := net.ParseIP(svc.Spec.ClusterIP); clsIP != nil { + clusterIPs = []net.IP{clsIP} + } + } else { + // Here we assume that the value of .spec.ClusterIPs[0] is same as that of the .spec.clusterIP + // or else Kubernetes will reject the service with validation error. + for _, ip := range svc.Spec.ClusterIPs { + if parsedIP := net.ParseIP(ip); parsedIP != nil { + clusterIPs = append(clusterIPs, parsedIP) + } + } + } + + headless := false + if strings.ToLower(svc.Spec.ClusterIP) == "none" { + headless = true + } + + var extTrafficPolicy loadbalancer.SVCTrafficPolicy + switch svc.Spec.ExternalTrafficPolicy { + case slim_corev1.ServiceExternalTrafficPolicyLocal: + extTrafficPolicy = loadbalancer.SVCTrafficPolicyLocal + default: + extTrafficPolicy = loadbalancer.SVCTrafficPolicyCluster + } + + var intTrafficPolicy loadbalancer.SVCTrafficPolicy + if svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == slim_corev1.ServiceInternalTrafficPolicyLocal { + intTrafficPolicy = loadbalancer.SVCTrafficPolicyLocal + } else { + intTrafficPolicy = loadbalancer.SVCTrafficPolicyCluster + } + + for _, ip := range svc.Status.LoadBalancer.Ingress { + if ip.IP != "" { + loadBalancerIPs = append(loadBalancerIPs, ip.IP) + } + } + lbSrcRanges := make([]string, 0, len(svc.Spec.LoadBalancerSourceRanges)) + for _, cidrString := range svc.Spec.LoadBalancerSourceRanges { + cidrStringTrimmed := strings.TrimSpace(cidrString) + lbSrcRanges = append(lbSrcRanges, cidrStringTrimmed) + } + + svcInfo := NewService(clusterIPs, svc.Spec.ExternalIPs, loadBalancerIPs, + lbSrcRanges, headless, extTrafficPolicy, intTrafficPolicy, + uint16(svc.Spec.HealthCheckNodePort), svc.Labels, svc.Spec.Selector, + svc.GetNamespace(), svcType) + + svcInfo.IncludeExternal = getAnnotationIncludeExternal(svc) + svcInfo.Shared = getAnnotationShared(svc) + svcInfo.ServiceAffinity = getAnnotationServiceAffinity(svc) + + if svc.Spec.SessionAffinity == slim_corev1.ServiceAffinityClientIP { + svcInfo.SessionAffinity = true + if cfg := svc.Spec.SessionAffinityConfig; cfg != nil && cfg.ClientIP != nil && cfg.ClientIP.TimeoutSeconds != nil { + svcInfo.SessionAffinityTimeoutSec = uint32(*cfg.ClientIP.TimeoutSeconds) + } + if svcInfo.SessionAffinityTimeoutSec == 0 { + svcInfo.SessionAffinityTimeoutSec = uint32(v1.DefaultClientIPServiceAffinitySeconds) + } + } + + for _, port := range svc.Spec.Ports { + p := loadbalancer.NewL4Addr(loadbalancer.L4Type(port.Protocol), uint16(port.Port)) + portName := loadbalancer.FEPortName(port.Name) + if _, ok := svcInfo.Ports[portName]; !ok { + svcInfo.Ports[portName] = p + } + // TODO(brb) Get rid of this hack by moving the creation of surrogate + // frontends to pkg/service. + // + // This is a hack;-( In the case of NodePort service, we need to create + // surrogate frontends per IP protocol - one with a zero IP addr and + // one per each public iface IP addr. + if svc.Spec.Type == slim_corev1.ServiceTypeNodePort || svc.Spec.Type == slim_corev1.ServiceTypeLoadBalancer { + if option.Config.EnableNodePort && nodeAddressing != nil { + proto := loadbalancer.L4Type(port.Protocol) + port := uint16(port.NodePort) + // This can happen if the service type is NodePort/LoadBalancer but the upstream apiserver + // did not assign any NodePort to the serivce port field. + // For example if `allocateLoadBalancerNodePorts` is set to false in the service + // spec. For more details see - + // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1864-disable-lb-node-ports + if port == uint16(0) { + continue + } + id := loadbalancer.ID(0) // will be allocated by k8s_watcher + + if _, ok := svcInfo.NodePorts[portName]; !ok { + svcInfo.NodePorts[portName] = + make(map[string]*loadbalancer.L3n4AddrID) + } + + if option.Config.EnableIPv4 && + utils.GetClusterIPByFamily(slim_corev1.IPv4Protocol, svc) != "" { + + for _, ip := range nodeAddressing.IPv4().LoadBalancerNodeAddresses() { + nodePortFE := loadbalancer.NewL3n4AddrID(proto, cmtypes.MustAddrClusterFromIP(ip), port, + loadbalancer.ScopeExternal, id) + svcInfo.NodePorts[portName][nodePortFE.String()] = nodePortFE + } + } + if option.Config.EnableIPv6 && + utils.GetClusterIPByFamily(slim_corev1.IPv6Protocol, svc) != "" { + + for _, ip := range nodeAddressing.IPv6().LoadBalancerNodeAddresses() { + nodePortFE := loadbalancer.NewL3n4AddrID(proto, cmtypes.MustAddrClusterFromIP(ip), port, + loadbalancer.ScopeExternal, id) + svcInfo.NodePorts[portName][nodePortFE.String()] = nodePortFE + } + } + } + } + } + + svcInfo.TopologyAware = getAnnotationTopologyAwareHints(svc) + + return svcID, svcInfo +} + +// ServiceID identifies the Kubernetes service +type ServiceID struct { + Cluster string `json:"cluster,omitempty"` + Name string `json:"serviceName,omitempty"` + Namespace string `json:"namespace,omitempty"` +} + +// String returns the string representation of a service ID +func (s ServiceID) String() string { + if s.Cluster != "" { + return fmt.Sprintf("%s/%s/%s", s.Cluster, s.Namespace, s.Name) + } + return fmt.Sprintf("%s/%s", s.Namespace, s.Name) +} + +// EndpointSliceID identifies a Kubernetes EndpointSlice as well as the legacy +// v1.Endpoints. +type EndpointSliceID struct { + ServiceID + EndpointSliceName string +} + +// ParseServiceIDFrom returns a ServiceID derived from the given kubernetes +// service FQDN. +func ParseServiceIDFrom(dn string) *ServiceID { + // typical service name "cilium-etcd-client.kube-system.svc" + idx1 := strings.IndexByte(dn, '.') + if idx1 >= 0 { + svc := ServiceID{ + Name: dn[:idx1], + } + idx2 := strings.IndexByte(dn[idx1+1:], '.') + if idx2 >= 0 { + // "cilium-etcd-client.kube-system.svc" + // ^idx1+1 ^ idx1+1+idx2 + svc.Namespace = dn[idx1+1 : idx1+1+idx2] + } else { + // "cilium-etcd-client.kube-system" + // ^idx1+1 + svc.Namespace = dn[idx1+1:] + } + return &svc + } + return nil +} + +// +deepequal-gen=true +type NodePortToFrontend map[string]*loadbalancer.L3n4AddrID + +// Service is an abstraction for a k8s service that is composed by the frontend IP +// addresses (FEIPs) and the map of the frontend ports (Ports). +// +// +k8s:deepcopy-gen=true +// +deepequal-gen=true +// +deepequal-gen:private-method=true +type Service struct { + // Until deepequal-gen adds support for net.IP we need to compare this field + // manually. + // Whenever creating a new service we should make sure that the FrontendIPs are + // sorted, so we always generate the same string representation. + // +deepequal-gen=false + FrontendIPs []net.IP + IsHeadless bool + + // IncludeExternal is true when external endpoints from other clusters + // should be included + IncludeExternal bool + + // Shared is true when the service should be exposed/shared to other clusters + Shared bool + + // ServiceAffinity determines the preferred endpoint destination (e.g. local + // vs remote clusters) + // + // Applicable values: local, remote, none (default). + ServiceAffinity string + + // ExtTrafficPolicy controls how backends are selected for North-South traffic. + // If set to "Local", only node-local backends are chosen. + ExtTrafficPolicy loadbalancer.SVCTrafficPolicy + + // IntTrafficPolicy controls how backends are selected for East-West traffic. + // If set to "Local", only node-local backends are chosen. + IntTrafficPolicy loadbalancer.SVCTrafficPolicy + + // HealthCheckNodePort defines on which port the node runs a HTTP health + // check server which may be used by external loadbalancers to determine + // if a node has local backends. This will only have effect if both + // LoadBalancerIPs is not empty and ExtTrafficPolicy is SVCTrafficPolicyLocal. + HealthCheckNodePort uint16 + + Ports map[loadbalancer.FEPortName]*loadbalancer.L4Addr + // NodePorts stores mapping for port name => NodePort frontend addr string => + // NodePort fronted addr. The string addr => addr indirection is to avoid + // storing duplicates. + NodePorts map[loadbalancer.FEPortName]NodePortToFrontend + // K8sExternalIPs stores mapping of the endpoint in a string format to the + // externalIP in net.IP format. + // + // Until deepequal-gen adds support for net.IP we need to compare this field + // manually. + // +deepequal-gen=false + K8sExternalIPs map[string]net.IP + + // LoadBalancerIPs stores LB IPs assigned to the service (string(IP) => IP). + // + // Until deepequal-gen adds support for net.IP we need to compare this field + // manually. + // +deepequal-gen=false + LoadBalancerIPs map[string]net.IP + LoadBalancerSourceRanges map[string]*cidr.CIDR + + Labels map[string]string + Selector map[string]string + + // SessionAffinity denotes whether service has the clientIP session affinity + SessionAffinity bool + // SessionAffinityTimeoutSeconds denotes session affinity timeout + SessionAffinityTimeoutSec uint32 + + // Type is the internal service type + // +deepequal-gen=false + Type loadbalancer.SVCType + + // TopologyAware denotes whether service endpoints might have topology aware + // hints + TopologyAware bool +} + +// DeepEqual returns true if s and other are deeply equal. +func (s *Service) DeepEqual(other *Service) bool { + if s == nil { + return other == nil + } + + if !s.deepEqual(other) { + return false + } + + if !ip.UnsortedIPListsAreEqual(s.FrontendIPs, other.FrontendIPs) { + return false + } + + if ((s.K8sExternalIPs != nil) && (other.K8sExternalIPs != nil)) || ((s.K8sExternalIPs == nil) != (other.K8sExternalIPs == nil)) { + in, other := s.K8sExternalIPs, other.K8sExternalIPs + if other == nil { + return false + } + + if len(in) != len(other) { + return false + } + for key, inValue := range in { + otherValue, present := other[key] + if !present { + return false + } + if !inValue.Equal(otherValue) { + return false + } + } + } + + if ((s.LoadBalancerIPs != nil) && (other.LoadBalancerIPs != nil)) || ((s.LoadBalancerIPs == nil) != (other.LoadBalancerIPs == nil)) { + in, other := s.LoadBalancerIPs, other.LoadBalancerIPs + if other == nil { + return false + } + + if len(in) != len(other) { + return false + } + for key, inValue := range in { + otherValue, present := other[key] + if !present { + return false + } + if !inValue.Equal(otherValue) { + return false + } + } + } + + return true +} + +// String returns the string representation of a service resource +func (s *Service) String() string { + if s == nil { + return "nil" + } + + ports := make([]string, len(s.Ports)) + i := 0 + for p := range s.Ports { + ports[i] = string(p) + i++ + } + + return fmt.Sprintf("frontends:%s/ports=%s/selector=%v", s.FrontendIPs, ports, s.Selector) +} + +// IsExternal returns true if the service is expected to serve out-of-cluster endpoints: +func (s Service) IsExternal() bool { + return len(s.Selector) == 0 +} + +func parseIPs(externalIPs []string) map[string]net.IP { + m := map[string]net.IP{} + for _, externalIP := range externalIPs { + ip := net.ParseIP(externalIP) + if ip != nil { + m[externalIP] = ip + } + } + return m +} + +// NewService returns a new Service with the Ports map initialized. +func NewService(ips []net.IP, externalIPs, loadBalancerIPs, loadBalancerSourceRanges []string, + headless bool, extTrafficPolicy, intTrafficPolicy loadbalancer.SVCTrafficPolicy, + healthCheckNodePort uint16, labels, selector map[string]string, + namespace string, svcType loadbalancer.SVCType) *Service { + + var ( + k8sExternalIPs map[string]net.IP + k8sLoadBalancerIPs map[string]net.IP + ) + + loadBalancerSourceCIDRs := make(map[string]*cidr.CIDR, len(loadBalancerSourceRanges)) + + for _, cidrString := range loadBalancerSourceRanges { + cidr, _ := cidr.ParseCIDR(cidrString) + loadBalancerSourceCIDRs[cidr.String()] = cidr + } + + // If EnableNodePort is not true we do not want to process + // events which only differ in external or load balancer IPs. + // By omitting these IPs in the returned Service object, they + // are no longer considered in equality checks and thus save + // CPU cycles processing events Cilium will not act upon. + if option.Config.EnableNodePort { + k8sExternalIPs = parseIPs(externalIPs) + k8sLoadBalancerIPs = parseIPs(loadBalancerIPs) + } else if option.Config.BGPAnnounceLBIP { + // The BGP LB Announcement feature requires that + // loadBalancerIPs be parsed. This is because + // an event must occur when a Service's Status field + // is updated with a new Ingress, ultimately triggering a + // BGP announcement. If we do not parse loadBalancerIPs + // this will not occur. + k8sLoadBalancerIPs = parseIPs(loadBalancerIPs) + } + + ip.SortIPList(ips) + return &Service{ + FrontendIPs: ips, + + IsHeadless: headless, + ExtTrafficPolicy: extTrafficPolicy, + IntTrafficPolicy: intTrafficPolicy, + HealthCheckNodePort: healthCheckNodePort, + + Ports: map[loadbalancer.FEPortName]*loadbalancer.L4Addr{}, + NodePorts: map[loadbalancer.FEPortName]NodePortToFrontend{}, + K8sExternalIPs: k8sExternalIPs, + LoadBalancerIPs: k8sLoadBalancerIPs, + LoadBalancerSourceRanges: loadBalancerSourceCIDRs, + + Labels: labels, + Selector: selector, + Type: svcType, + } +} + +// UniquePorts returns a map of all unique ports configured in the service +func (s *Service) UniquePorts() map[uint16]bool { + // We are not discriminating the different L4 protocols on the same L4 + // port so we create the number of unique sets of service IP + service + // port. + uniqPorts := map[uint16]bool{} + for _, p := range s.Ports { + uniqPorts[p.Port] = true + } + return uniqPorts +} + +// NewClusterService returns the serviceStore.ClusterService representing a +// Kubernetes Service +func NewClusterService(id ServiceID, k8sService *Service, k8sEndpoints *Endpoints) serviceStore.ClusterService { + svc := serviceStore.NewClusterService(id.Name, id.Namespace) + + for key, value := range k8sService.Labels { + svc.Labels[key] = value + } + + for key, value := range k8sService.Selector { + svc.Selector[key] = value + } + + portConfig := serviceStore.PortConfiguration{} + for portName, port := range k8sService.Ports { + portConfig[string(portName)] = port + } + + svc.Frontends = map[string]serviceStore.PortConfiguration{} + for _, feIP := range k8sService.FrontendIPs { + svc.Frontends[feIP.String()] = portConfig + } + + svc.Backends = map[string]serviceStore.PortConfiguration{} + for addrCluster, backend := range k8sEndpoints.Backends { + svc.Backends[addrCluster.Addr().String()] = backend.Ports + } + + svc.Shared = k8sService.Shared + svc.IncludeExternal = k8sService.IncludeExternal + + return svc +} + +// ParseClusterService parses a ClusterService and returns a Service. +// ClusterService is a subset of what a Service can express, +// especially, ClusterService does not have: +// - other service types than ClusterIP +// - an explicit traffic policy, SVCTrafficPolicyCluster is assumed +// - health check node ports +// - NodePorts +// - external IPs +// - LoadBalancerIPs +// - LoadBalancerSourceRanges +// - SessionAffinity +// +// ParseClusterService() is paired with EqualsClusterService() that +// has the above wired in. +func ParseClusterService(svc *serviceStore.ClusterService) *Service { + svcInfo := &Service{ + IsHeadless: len(svc.Frontends) == 0, + IncludeExternal: true, + Shared: true, + ExtTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, + IntTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, + Ports: map[loadbalancer.FEPortName]*loadbalancer.L4Addr{}, + Labels: svc.Labels, + Selector: svc.Selector, + Type: loadbalancer.SVCTypeClusterIP, + } + + feIPs := make([]net.IP, len(svc.Frontends)) + i := 0 + for ipStr, ports := range svc.Frontends { + feIPs[i] = net.ParseIP(ipStr) + for name, port := range ports { + p := loadbalancer.NewL4Addr(loadbalancer.L4Type(port.Protocol), uint16(port.Port)) + portName := loadbalancer.FEPortName(name) + if _, ok := svcInfo.Ports[portName]; !ok { + svcInfo.Ports[portName] = p + } + } + i++ + } + ip.SortIPList(feIPs) + svcInfo.FrontendIPs = feIPs + + return svcInfo +} + +// EqualsClusterService returns true the given ClusterService would parse into Service if +// ParseClusterService() would be called. This is necessary to avoid memory allocations that +// would be performed by ParseClusterService() when the service already exists. +func (s *Service) EqualsClusterService(svc *serviceStore.ClusterService) bool { + switch { + case (s == nil) != (svc == nil): + return false + case (s == nil) && (svc == nil): + return true + } + + feIPs := make([]net.IP, len(svc.Frontends)) + fePorts := serviceStore.PortConfiguration{} + i := 0 + for ipStr, ports := range svc.Frontends { + feIPs[i] = net.ParseIP(ipStr) + for name, port := range ports { + if _, ok := fePorts[name]; !ok { + fePorts[name] = port + } + } + i++ + } + + // These comparisons must match the ParseClusterService() function above. + if ip.UnsortedIPListsAreEqual(s.FrontendIPs, feIPs) && + s.IsHeadless == (len(svc.Frontends) == 0) && + s.IncludeExternal && + s.Shared && + s.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyCluster && + s.IntTrafficPolicy == loadbalancer.SVCTrafficPolicyCluster && + s.HealthCheckNodePort == 0 && + len(s.NodePorts) == 0 && + len(s.K8sExternalIPs) == 0 && + len(s.LoadBalancerIPs) == 0 && + len(s.LoadBalancerSourceRanges) == 0 && + comparator.MapStringEquals(s.Labels, svc.Labels) && + comparator.MapStringEquals(s.Selector, svc.Selector) && + !s.SessionAffinity && + s.SessionAffinityTimeoutSec == 0 && + s.Type == loadbalancer.SVCTypeClusterIP { + + if ((s.Ports == nil) != (fePorts == nil)) || + len(s.Ports) != len(fePorts) { + return false + } + for portName, port := range s.Ports { + oPort, ok := fePorts[string(portName)] + if !ok { + return false + } + if port.Protocol != oPort.Protocol || port.Port != oPort.Port { + return false + } + } + return true + } + return false +} + +type ServiceIPGetter interface { + GetServiceIP(svcID ServiceID) *loadbalancer.L3n4Addr +} + +// CreateCustomDialer returns a custom dialer that picks the service IP, +// from the given ServiceIPGetter, if the address used to dial is a k8s +// service. If verboseLogs is set, a log message is output when the +// address to service IP translation fails. +func CreateCustomDialer(b ServiceIPGetter, log logrus.FieldLogger, verboseLogs bool) func(ctx context.Context, addr string) (conn net.Conn, e error) { + return func(ctx context.Context, s string) (conn net.Conn, e error) { + // If the service is available, do the service translation to + // the service IP. Otherwise dial with the original service + // name `s`. + u, err := url.Parse(s) + if err == nil { + var svc *ServiceID + // In etcd v3.5.0, 's' doesn't contain the URL Scheme and the u.Host + // will be empty because url.Parse will consider the "host" as the + // url Scheme. If 's' doesn't contain the URL Scheme then we will be + // able to parse the service ID directly from it without the need + // to do url.Parse. + if u.Host != "" { + svc = ParseServiceIDFrom(u.Host) + } else { + svc = ParseServiceIDFrom(s) + } + if svc != nil { + svcIP := b.GetServiceIP(*svc) + if svcIP != nil { + s = svcIP.String() + } else if verboseLogs { + log.Debug("Service not found in the service IP getter") + } + } else if verboseLogs { + log.WithFields(logrus.Fields{ + "url-host": u.Host, + "url": s, + }).Debug("Unable to parse etcd service URL into a service ID") + } + } else if verboseLogs { + log.WithError(err).Error("Unable to parse etcd service URL") + } + + log.Debugf("Custom dialer based on k8s service backend is dialing to %q", s) + return (&net.Dialer{}).DialContext(ctx, "tcp", s) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go new file mode 100644 index 0000000000..4f0666fd32 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "context" + "net" + "slices" + "sync" + + "github.com/davecgh/go-spew/spew" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + core_v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/datapath/types" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/ip" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + "github.com/cilium/cilium/pkg/loadbalancer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/node" + "github.com/cilium/cilium/pkg/option" + serviceStore "github.com/cilium/cilium/pkg/service/store" +) + +// ServiceCacheCell initializes the service cache holds the list of known services +// correlated with the matching endpoints +var ServiceCacheCell = cell.Module( + "service-cache", + "Service Cache", + + cell.Config(ServiceCacheConfig{}), + cell.Provide(newServiceCache), +) + +// ServiceCacheConfig defines the configuration options for the service cache. +type ServiceCacheConfig struct { + EnableServiceTopology bool +} + +// Flags implements the cell.Flagger interface. +func (def ServiceCacheConfig) Flags(flags *pflag.FlagSet) { + flags.Bool("enable-service-topology", def.EnableServiceTopology, "Enable support for service topology aware hints") +} + +// CacheAction is the type of action that was performed on the cache +type CacheAction int + +const ( + // UpdateService reflects that the service was updated or added + UpdateService CacheAction = iota + + // DeleteService reflects that the service was deleted + DeleteService +) + +// String returns the cache action as a string +func (c CacheAction) String() string { + switch c { + case UpdateService: + return "service-updated" + case DeleteService: + return "service-deleted" + default: + return "unknown" + } +} + +// ServiceEvent is emitted via the Events channel of ServiceCache and describes +// the change that occurred in the cache +type ServiceEvent struct { + // Action is the action that was performed in the cache + Action CacheAction + + // ID is the identified of the service + ID ServiceID + + // Service is the service structure + Service *Service + + // OldService is the old service structure + OldService *Service + + // Endpoints is the endpoints structured correlated with the service + Endpoints *Endpoints + + // OldEndpoints is old endpoints structure. + OldEndpoints *Endpoints + + // SWG provides a mechanism to detect if a service was synchronized with + // the datapath. + SWG *lock.StoppableWaitGroup +} + +// ServiceCache is a list of services correlated with the matching endpoints. +// The Events member will receive events as services. +type ServiceCache struct { + config ServiceCacheConfig + + Events chan ServiceEvent + + // mutex protects the maps below including the concurrent access of each + // value. + mutex lock.RWMutex + services map[ServiceID]*Service + // endpoints maps a service to a map of EndpointSlices. In case the cluster + // is still using the v1.Endpoints, the key used in the internal map of + // EndpointSlices is the v1.Endpoint name. + endpoints map[ServiceID]*EndpointSlices + + // externalEndpoints is a list of additional service backends derived from source other than the local cluster + externalEndpoints map[ServiceID]externalEndpoints + + nodeAddressing types.NodeAddressing + + selfNodeZoneLabel string + + ServiceMutators []func(svc *slim_corev1.Service, svcInfo *Service) +} + +// NewServiceCache returns a new ServiceCache +func NewServiceCache(nodeAddressing types.NodeAddressing) *ServiceCache { + return &ServiceCache{ + services: map[ServiceID]*Service{}, + endpoints: map[ServiceID]*EndpointSlices{}, + externalEndpoints: map[ServiceID]externalEndpoints{}, + Events: make(chan ServiceEvent, option.Config.K8sServiceCacheSize), + nodeAddressing: nodeAddressing, + } +} + +func newServiceCache(lc cell.Lifecycle, nodeAddressing types.NodeAddressing, cfg ServiceCacheConfig, lns *node.LocalNodeStore) *ServiceCache { + sc := NewServiceCache(nodeAddressing) + sc.config = cfg + + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + lc.Append(cell.Hook{ + OnStart: func(hc cell.HookContext) error { + if !cfg.EnableServiceTopology { + return nil + } + + // Explicitly get the labels in addition to registering the observer, + // as otherwise we wouldn't block until the first event is observed. + ln, err := lns.Get(hc) + sc.updateSelfNodeLabels(ln.Labels) + + wg.Add(1) + lns.Observe(ctx, func(ln node.LocalNode) { + sc.updateSelfNodeLabels(ln.Labels) + }, func(error) { wg.Done() }) + + return err + }, + OnStop: func(hc cell.HookContext) error { + cancel() + wg.Wait() + return nil + }, + }) + + return sc +} + +// GetServiceIP returns a random L3n4Addr that is backing the given Service ID. +// The returned IP is with external scope since its string representation might +// be used for net Dialer. +func (s *ServiceCache) GetServiceIP(svcID ServiceID) *loadbalancer.L3n4Addr { + s.mutex.RLock() + defer s.mutex.RUnlock() + svc := s.services[svcID] + if svc == nil || len(svc.FrontendIPs) == 0 || len(svc.Ports) == 0 { + return nil + } + + feIP := ip.GetIPFromListByFamily(svc.FrontendIPs, option.Config.EnableIPv4) + if feIP == nil { + return nil + } + + for _, port := range svc.Ports { + return loadbalancer.NewL3n4Addr(port.Protocol, cmtypes.MustAddrClusterFromIP(feIP), port.Port, + loadbalancer.ScopeExternal) + } + return nil +} + +// GetServiceFrontendIP returns the frontend IP (aka clusterIP) for the given service with type. +func (s *ServiceCache) GetServiceFrontendIP(svcID ServiceID, svcType loadbalancer.SVCType) net.IP { + s.mutex.RLock() + defer s.mutex.RUnlock() + svc := s.services[svcID] + if svc == nil || svc.Type != svcType || len(svc.FrontendIPs) == 0 { + return nil + } + + return ip.GetIPFromListByFamily(svc.FrontendIPs, option.Config.EnableIPv4) +} + +// GetServiceAddrsWithType returns a map of all the ports and slice of L3n4Addr that are backing the +// given Service ID with given type. It also returns the number of frontend IPs associated with the service. +// Note: The returned IPs are with External scope. +func (s *ServiceCache) GetServiceAddrsWithType(svcID ServiceID, + svcType loadbalancer.SVCType) (map[loadbalancer.FEPortName][]*loadbalancer.L3n4Addr, int) { + s.mutex.RLock() + defer s.mutex.RUnlock() + svc := s.services[svcID] + if svc == nil || svc.Type != svcType || len(svc.FrontendIPs) == 0 { + return nil, 0 + } + + addrsByPort := make(map[loadbalancer.FEPortName][]*loadbalancer.L3n4Addr) + for pName, l4Addr := range svc.Ports { + addrs := make([]*loadbalancer.L3n4Addr, 0, len(svc.FrontendIPs)) + for _, feIP := range svc.FrontendIPs { + if isValidServiceFrontendIP(feIP) { + addrs = append(addrs, loadbalancer.NewL3n4Addr(l4Addr.Protocol, cmtypes.MustAddrClusterFromIP(feIP), l4Addr.Port, loadbalancer.ScopeExternal)) + } + } + + addrsByPort[pName] = addrs + } + + return addrsByPort, len(svc.FrontendIPs) +} + +// GetEndpointsOfService returns all the endpoints that correlate with a +// service given a ServiceID. +func (s *ServiceCache) GetEndpointsOfService(svcID ServiceID) *Endpoints { + s.mutex.RLock() + defer s.mutex.RUnlock() + eps, ok := s.endpoints[svcID] + if !ok { + return nil + } + return eps.GetEndpoints() +} + +// GetNodeAddressing returns the registered node addresses to this service cache. +func (s *ServiceCache) GetNodeAddressing() types.NodeAddressing { + return s.nodeAddressing +} + +// UpdateService parses a Kubernetes service and adds or updates it in the +// ServiceCache. Returns the ServiceID unless the Kubernetes service could not +// be parsed and a bool to indicate whether the service was changed in the +// cache or not. +func (s *ServiceCache) UpdateService(k8sSvc *slim_corev1.Service, swg *lock.StoppableWaitGroup) ServiceID { + svcID, newService := ParseService(k8sSvc, s.nodeAddressing) + if newService == nil { + return svcID + } + + for _, mutator := range s.ServiceMutators { + mutator(k8sSvc, newService) + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + oldService, ok := s.services[svcID] + if ok { + if oldService.DeepEqual(newService) { + return svcID + } + } + + s.services[svcID] = newService + + // Check if the corresponding Endpoints resource is already available + endpoints, serviceReady := s.correlateEndpoints(svcID) + if serviceReady { + swg.Add() + s.Events <- ServiceEvent{ + Action: UpdateService, + ID: svcID, + Service: newService, + OldService: oldService, + Endpoints: endpoints, + OldEndpoints: endpoints, + SWG: swg, + } + } + + return svcID +} + +func (s *ServiceCache) EnsureService(svcID ServiceID, swg *lock.StoppableWaitGroup) bool { + s.mutex.RLock() + defer s.mutex.RUnlock() + if svc, found := s.services[svcID]; found { + if endpoints, serviceReady := s.correlateEndpoints(svcID); serviceReady { + swg.Add() + s.Events <- ServiceEvent{ + Action: UpdateService, + ID: svcID, + Service: svc, + OldService: svc, + Endpoints: endpoints, + OldEndpoints: endpoints, + SWG: swg, + } + return true + } + } + return false +} + +// DeleteService parses a Kubernetes service and removes it from the +// ServiceCache +func (s *ServiceCache) DeleteService(k8sSvc *slim_corev1.Service, swg *lock.StoppableWaitGroup) { + svcID := ParseServiceID(k8sSvc) + + s.mutex.Lock() + defer s.mutex.Unlock() + + oldService, serviceOK := s.services[svcID] + endpoints, _ := s.correlateEndpoints(svcID) + delete(s.services, svcID) + + if serviceOK { + swg.Add() + s.Events <- ServiceEvent{ + Action: DeleteService, + ID: svcID, + Service: oldService, + Endpoints: endpoints, + SWG: swg, + } + } +} + +// LocalServices returns the list of known services that are not marked as +// global (i.e., whose backends are all in the local cluster only). +func (s *ServiceCache) LocalServices() sets.Set[ServiceID] { + ids := sets.New[ServiceID]() + + s.mutex.RLock() + defer s.mutex.RUnlock() + + for id, svc := range s.services { + if !svc.IncludeExternal { + ids.Insert(id) + } + } + + return ids +} + +// UpdateEndpoints parses a Kubernetes endpoints and adds or updates it in the +// ServiceCache. Returns the ServiceID unless the Kubernetes endpoints could not +// be parsed and a bool to indicate whether the endpoints was changed in the +// cache or not. +func (s *ServiceCache) UpdateEndpoints(newEndpoints *Endpoints, swg *lock.StoppableWaitGroup) (ServiceID, *Endpoints) { + s.mutex.Lock() + defer s.mutex.Unlock() + + esID := newEndpoints.EndpointSliceID + + var oldEPs *Endpoints + eps, ok := s.endpoints[esID.ServiceID] + if ok { + oldEPs = eps.epSlices[esID.EndpointSliceName] + if oldEPs.DeepEqual(newEndpoints) { + return esID.ServiceID, newEndpoints + } + } else { + eps = newEndpointsSlices() + s.endpoints[esID.ServiceID] = eps + } + + eps.Upsert(esID.EndpointSliceName, newEndpoints) + + // Check if the corresponding Endpoints resource is already available + svc, ok := s.services[esID.ServiceID] + endpoints, serviceReady := s.correlateEndpoints(esID.ServiceID) + if ok && serviceReady { + swg.Add() + s.Events <- ServiceEvent{ + Action: UpdateService, + ID: esID.ServiceID, + Service: svc, + Endpoints: endpoints, + OldEndpoints: oldEPs, + SWG: swg, + } + } + + return esID.ServiceID, endpoints +} + +// DeleteEndpoints parses a Kubernetes endpoints and removes it from the +// ServiceCache +func (s *ServiceCache) DeleteEndpoints(svcID EndpointSliceID, swg *lock.StoppableWaitGroup) ServiceID { + s.mutex.Lock() + defer s.mutex.Unlock() + + var oldEPs *Endpoints + svc, serviceOK := s.services[svcID.ServiceID] + eps, ok := s.endpoints[svcID.ServiceID] + if ok { + oldEPs = eps.epSlices[svcID.EndpointSliceName].DeepCopy() // copy for passing to ServiceEvent + isEmpty := eps.Delete(svcID.EndpointSliceName) + if isEmpty { + delete(s.endpoints, svcID.ServiceID) + } + } + endpoints, _ := s.correlateEndpoints(svcID.ServiceID) + + if serviceOK { + swg.Add() + event := ServiceEvent{ + Action: UpdateService, + ID: svcID.ServiceID, + Service: svc, + Endpoints: endpoints, + OldEndpoints: oldEPs, + SWG: swg, + } + + s.Events <- event + } + + return svcID.ServiceID +} + +// FrontendList is the list of all k8s service frontends +type FrontendList map[string]struct{} + +// LooseMatch returns true if the provided frontend is found in the +// FrontendList. If the frontend has a protocol value set, it only matches a +// k8s service with a matching protocol. If no protocol is set, any k8s service +// matching frontend IP and port is considered a match, regardless of protocol. +func (l FrontendList) LooseMatch(frontend loadbalancer.L3n4Addr) (exists bool) { + switch frontend.Protocol { + case loadbalancer.NONE: + for _, protocol := range loadbalancer.AllProtocols { + frontend.Protocol = protocol + _, exists = l[frontend.StringWithProtocol()] + if exists { + return + } + } + + // If the protocol is set, perform an exact match + default: + _, exists = l[frontend.StringWithProtocol()] + } + return +} + +// UniqueServiceFrontends returns all externally scoped services known to +// the service cache as a map, indexed by the string representation of a +// loadbalancer.L3n4Addr. This helper is only used in unit tests. +func (s *ServiceCache) UniqueServiceFrontends() FrontendList { + uniqueFrontends := FrontendList{} + + s.mutex.RLock() + defer s.mutex.RUnlock() + + for _, svc := range s.services { + for _, feIP := range svc.FrontendIPs { + for _, p := range svc.Ports { + address := loadbalancer.L3n4Addr{ + AddrCluster: cmtypes.MustAddrClusterFromIP(feIP), + L4Addr: *p, + Scope: loadbalancer.ScopeExternal, + } + uniqueFrontends[address.StringWithProtocol()] = struct{}{} + } + } + + for _, nodePortFEs := range svc.NodePorts { + for _, fe := range nodePortFEs { + if fe.Scope == loadbalancer.ScopeExternal { + uniqueFrontends[fe.StringWithProtocol()] = struct{}{} + } + } + } + } + + return uniqueFrontends +} + +// filterEndpoints filters local endpoints by using k8s service heuristics. +// For now it only implements the topology aware hints. +func (s *ServiceCache) filterEndpoints(localEndpoints *Endpoints, svc *Service) *Endpoints { + if !s.config.EnableServiceTopology || svc == nil || !svc.TopologyAware { + return localEndpoints + } + + if s.selfNodeZoneLabel == "" { + // The node doesn't have the zone label set, so we cannot filter endpoints + // by zone. Therefore, return all endpoints. + return localEndpoints + } + + if svc.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal || svc.IntTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal { + // According to https://kubernetes.io/docs/concepts/services-networking/topology-aware-hints/#constraints: + // """ + // Topology Aware Hints are not used when either externalTrafficPolicy or + // internalTrafficPolicy is set to Local on a Service. + // """ + return localEndpoints + } + + filteredEndpoints := &Endpoints{Backends: map[cmtypes.AddrCluster]*Backend{}} + + for key, backend := range localEndpoints.Backends { + if len(backend.HintsForZones) == 0 { + return localEndpoints + } + + for _, hint := range backend.HintsForZones { + if hint == s.selfNodeZoneLabel { + filteredEndpoints.Backends[key] = backend + break + } + } + } + + if len(filteredEndpoints.Backends) == 0 { + // Fallback to all endpoints if there is no any which could match + // the zone. Otherwise, the node will start dropping requests to + // the service. + return localEndpoints + } + + return filteredEndpoints +} + +// correlateEndpoints builds a combined Endpoints of the local endpoints and +// all external endpoints if the service is marked as a global service. Also +// returns a boolean that indicates whether the service is ready to be plumbed, +// this is true if: +// A local endpoints resource is present. Regardless whether the +// +// endpoints resource contains actual backends or not. +// +// OR Remote endpoints exist which correlate to the service. +func (s *ServiceCache) correlateEndpoints(id ServiceID) (*Endpoints, bool) { + endpoints := newEndpoints() + + localEndpoints := s.endpoints[id].GetEndpoints() + svc, svcFound := s.services[id] + + hasLocalEndpoints := localEndpoints != nil + if hasLocalEndpoints { + localEndpoints = s.filterEndpoints(localEndpoints, svc) + + for ip, e := range localEndpoints.Backends { + e.Preferred = svcFound && svc.IncludeExternal && svc.ServiceAffinity == serviceAffinityLocal + endpoints.Backends[ip] = e.DeepCopy() + } + } + + var hasExternalEndpoints bool + if svcFound && svc.IncludeExternal { + externalEndpoints, ok := s.externalEndpoints[id] + hasExternalEndpoints = ok && len(externalEndpoints.endpoints) > 0 + if hasExternalEndpoints { + // remote cluster endpoints already contain all Endpoints from all + // EndpointSlices so no need to search the endpoints of a particular + // EndpointSlice. + for clusterName, remoteClusterEndpoints := range externalEndpoints.endpoints { + for ip, e := range remoteClusterEndpoints.Backends { + if _, ok := endpoints.Backends[ip]; ok { + log.WithFields(logrus.Fields{ + logfields.K8sSvcName: id.Name, + logfields.K8sNamespace: id.Namespace, + logfields.IPAddr: ip, + "cluster": clusterName, + }).Warning("Conflicting service backend IP") + } else { + e.Preferred = svc.ServiceAffinity == serviceAffinityRemote + endpoints.Backends[ip] = e.DeepCopy() + } + } + } + } + } + + // Report the service as ready if a local endpoints object exists or if + // external endpoints have been identified + return endpoints, hasLocalEndpoints || hasExternalEndpoints +} + +// mergeExternalServiceOption is the type for the options to customize the behavior of external services merging. +type mergeExternalServiceOption int + +const ( + // optClusterAware enables the cluster aware handling for external services merging. + optClusterAware mergeExternalServiceOption = iota +) + +// MergeExternalServiceUpdate merges a cluster service of a remote cluster into +// the local service cache. The service endpoints are stored as external endpoints +// and are correlated on demand with local services via correlateEndpoints(). +func (s *ServiceCache) MergeExternalServiceUpdate(service *serviceStore.ClusterService, swg *lock.StoppableWaitGroup) { + // Ignore updates of own cluster + if service.Cluster == option.Config.ClusterName { + return + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + s.mergeServiceUpdateLocked(service, nil, swg) +} + +func (s *ServiceCache) mergeServiceUpdateLocked(service *serviceStore.ClusterService, + oldService *Service, swg *lock.StoppableWaitGroup, opts ...mergeExternalServiceOption) { + scopedLog := log.WithFields(logrus.Fields{logfields.ServiceName: service.String()}) + + id := ServiceID{Name: service.Name, Namespace: service.Namespace} + if slices.Contains(opts, optClusterAware) { + id.Cluster = service.Cluster + } + + externalEndpoints, ok := s.externalEndpoints[id] + if !ok { + externalEndpoints = newExternalEndpoints() + s.externalEndpoints[id] = externalEndpoints + } + + oldEPs, _ := s.correlateEndpoints(id) + + // The cluster the service belongs to will match the current one when dealing with external + // workloads (and in that case all endpoints shall be always present), and not match in the + // cluster-mesh case (where remote endpoints shall be used only if it is shared). + if service.Cluster != option.Config.ClusterName && !service.Shared { + delete(externalEndpoints.endpoints, service.Cluster) + } else { + scopedLog.Debugf("Updating backends to %+v", service.Backends) + backends := map[cmtypes.AddrCluster]*Backend{} + for ipString, portConfig := range service.Backends { + addr, err := cmtypes.ParseAddrCluster(ipString) + if err != nil { + scopedLog.WithField(logfields.IPAddr, ipString). + Error("Skipping service backend due to invalid IP address") + continue + } + + backends[addr] = &Backend{Ports: portConfig} + } + externalEndpoints.endpoints[service.Cluster] = &Endpoints{ + Backends: backends, + } + } + + svc, ok := s.services[id] + + endpoints, serviceReady := s.correlateEndpoints(id) + + // Only send event notification if service is ready. + if ok && serviceReady { + swg.Add() + s.Events <- ServiceEvent{ + Action: UpdateService, + ID: id, + Service: svc, + OldService: oldService, + Endpoints: endpoints, + OldEndpoints: oldEPs, + SWG: swg, + } + } +} + +// MergeExternalServiceDelete merges the deletion of a cluster service in a +// remote cluster into the local service cache. The service endpoints are +// stored as external endpoints and are correlated on demand with local +// services via correlateEndpoints(). +func (s *ServiceCache) MergeExternalServiceDelete(service *serviceStore.ClusterService, swg *lock.StoppableWaitGroup) { + // Ignore updates of own cluster + if service.Cluster == option.Config.ClusterName { + return + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + id := ServiceID{Cluster: service.Cluster, Name: service.Name, Namespace: service.Namespace} + var opts []mergeExternalServiceOption + if _, clusterAware := s.services[id]; clusterAware { + opts = append(opts, optClusterAware) + } + + s.mergeExternalServiceDeleteLocked(service, swg, opts...) +} + +func (s *ServiceCache) mergeExternalServiceDeleteLocked(service *serviceStore.ClusterService, swg *lock.StoppableWaitGroup, opts ...mergeExternalServiceOption) { + scopedLog := log.WithFields(logrus.Fields{logfields.ServiceName: service.String()}) + + id := ServiceID{Name: service.Name, Namespace: service.Namespace} + if slices.Contains(opts, optClusterAware) { + id.Cluster = service.Cluster + } + + externalEndpoints, ok := s.externalEndpoints[id] + if ok { + scopedLog.Debug("Deleting external endpoints") + + oldEPs, _ := s.correlateEndpoints(id) + + delete(externalEndpoints.endpoints, service.Cluster) + if len(externalEndpoints.endpoints) == 0 { + delete(s.externalEndpoints, id) + } + + svc, ok := s.services[id] + + endpoints, serviceReady := s.correlateEndpoints(id) + + // Only send event notification if service is shared. + if ok && svc.Shared { + swg.Add() + event := ServiceEvent{ + Action: UpdateService, + ID: id, + Service: svc, + Endpoints: endpoints, + OldEndpoints: oldEPs, + SWG: swg, + } + + if !serviceReady { + delete(s.services, id) + event.Action = DeleteService + } + + s.Events <- event + } + } else { + scopedLog.Debug("Received delete event for non-existing endpoints") + } +} + +// MergeClusterServiceUpdate merges a cluster service of a local cluster into +// the local service cache. The service endpoints are stored as external endpoints +// and are correlated on demand with local services via correlateEndpoints(). +// Local service is created and/or updated if needed. +func (s *ServiceCache) MergeClusterServiceUpdate(service *serviceStore.ClusterService, swg *lock.StoppableWaitGroup) { + scopedLog := log.WithFields(logrus.Fields{logfields.ServiceName: service.String()}) + id := ServiceID{Name: service.Name, Namespace: service.Namespace} + + s.mutex.Lock() + defer s.mutex.Unlock() + + var oldService *Service + svc, ok := s.services[id] + if !ok || !svc.EqualsClusterService(service) { + oldService = svc + svc = ParseClusterService(service) + s.services[id] = svc + scopedLog.Debugf("Added new service %v", svc) + } + s.mergeServiceUpdateLocked(service, oldService, swg) +} + +// MergeClusterServiceDelete merges the deletion of a cluster service in a +// remote cluster into the local service cache, deleting the local service. +func (s *ServiceCache) MergeClusterServiceDelete(service *serviceStore.ClusterService, swg *lock.StoppableWaitGroup) { + scopedLog := log.WithFields(logrus.Fields{logfields.ServiceName: service.String()}) + id := ServiceID{Name: service.Name, Namespace: service.Namespace} + + s.mutex.Lock() + defer s.mutex.Unlock() + + externalEndpoints, ok := s.externalEndpoints[id] + if ok { + scopedLog.Debug("Deleting cluster endpoints") + delete(externalEndpoints.endpoints, service.Cluster) + if len(externalEndpoints.endpoints) == 0 { + delete(s.externalEndpoints, id) + } + } + + svc, ok := s.services[id] + endpoints, _ := s.correlateEndpoints(id) + delete(s.services, id) + + if ok { + swg.Add() + s.Events <- ServiceEvent{ + Action: DeleteService, + ID: id, + Service: svc, + Endpoints: endpoints, + SWG: swg, + } + } +} + +// DebugStatus implements debug.StatusObject to provide debug status collection +// ability +func (s *ServiceCache) DebugStatus() string { + s.mutex.RLock() + str := spew.Sdump(s) + s.mutex.RUnlock() + return str +} + +func (s *ServiceCache) updateSelfNodeLabels(labels map[string]string) { + s.mutex.Lock() + defer s.mutex.Unlock() + + zone := labels[core_v1.LabelTopologyZone] + + if s.selfNodeZoneLabel == zone { + return + } + + s.selfNodeZoneLabel = zone + + for id, svc := range s.services { + if !svc.TopologyAware { + continue + } + + if endpoints, ready := s.correlateEndpoints(id); ready { + swg := lock.NewStoppableWaitGroup() + swg.Add() + s.Events <- ServiceEvent{ + Action: UpdateService, + ID: id, + Service: svc, + OldService: svc, + Endpoints: endpoints, + OldEndpoints: endpoints, + SWG: swg, + } + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go new file mode 100644 index 0000000000..6474070a36 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +deepequal-gen=package +// +groupName=discovery.k8s.io + +// Package v1 contains slimmer versions of k8s discovery types. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go new file mode 100644 index 0000000000..13b934cc88 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go @@ -0,0 +1,2092 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointHints) Reset() { *m = EndpointHints{} } +func (*EndpointHints) ProtoMessage() {} +func (*EndpointHints) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{2} +} +func (m *EndpointHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointHints.Merge(m, src) +} +func (m *EndpointHints) XXX_Size() int { + return m.Size() +} +func (m *EndpointHints) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointHints.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointHints proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{3} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{4} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{5} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func (m *ForZone) Reset() { *m = ForZone{} } +func (*ForZone) ProtoMessage() {} +func (*ForZone) Descriptor() ([]byte, []int) { + return fileDescriptor_824daf76e2aebd1d, []int{6} +} +func (m *ForZone) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ForZone) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForZone.Merge(m, src) +} +func (m *ForZone) XXX_Size() int { + return m.Size() +} +func (m *ForZone) XXX_DiscardUnknown() { + xxx_messageInfo_ForZone.DiscardUnknown(m) +} + +var xxx_messageInfo_ForZone proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint.DeprecatedTopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointConditions") + proto.RegisterType((*EndpointHints)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointHints") + proto.RegisterType((*EndpointPort)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSliceList") + proto.RegisterType((*ForZone)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.ForZone") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto", fileDescriptor_824daf76e2aebd1d) +} + +var fileDescriptor_824daf76e2aebd1d = []byte{ + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0xe4, 0x34, + 0x18, 0x1e, 0xef, 0x34, 0x34, 0xe3, 0xb4, 0x62, 0xd7, 0x42, 0x22, 0xaa, 0x50, 0x32, 0x1a, 0x09, + 0x69, 0xa4, 0x15, 0x89, 0xda, 0x03, 0xaa, 0x7a, 0x40, 0x6c, 0xb6, 0x5d, 0xed, 0x22, 0x28, 0x2b, + 0x6f, 0x4f, 0x2b, 0x0e, 0xa4, 0x89, 0x37, 0x75, 0x67, 0x62, 0x87, 0xd8, 0x33, 0xd2, 0x70, 0x42, + 0xfb, 0x07, 0xe0, 0x67, 0x70, 0xe3, 0xc6, 0x6f, 0xe8, 0x71, 0x8f, 0x7b, 0x8a, 0x68, 0xf8, 0x09, + 0xdc, 0x7a, 0x42, 0x76, 0xbe, 0xa6, 0x4c, 0x11, 0xea, 0x30, 0xa7, 0xd8, 0x8f, 0x5f, 0x3f, 0xef, + 0xf3, 0x3e, 0xaf, 0x6d, 0x05, 0x7e, 0x95, 0x50, 0x79, 0x31, 0x3b, 0xf7, 0x22, 0x9e, 0xfa, 0x11, + 0x9d, 0xd2, 0x59, 0xfb, 0xc9, 0x26, 0x89, 0x3f, 0x39, 0x14, 0xbe, 0x98, 0xd2, 0x54, 0x0f, 0xc2, + 0x8c, 0xfa, 0x31, 0x15, 0x11, 0x9f, 0x93, 0x7c, 0xe1, 0xcf, 0xf7, 0xfd, 0x84, 0x30, 0x92, 0x87, + 0x92, 0xc4, 0x5e, 0x96, 0x73, 0xc9, 0xd1, 0x51, 0xc7, 0xe5, 0x55, 0x24, 0xcd, 0x27, 0x9b, 0x24, + 0xde, 0xe4, 0x50, 0x78, 0x8a, 0x4b, 0x0f, 0xc2, 0x8c, 0x7a, 0x2d, 0x97, 0x37, 0xdf, 0xdf, 0x3b, + 0xb9, 0x9f, 0x8e, 0x88, 0xe7, 0xe4, 0x0e, 0x09, 0x7b, 0xcf, 0xee, 0x45, 0x23, 0xfc, 0x94, 0xc8, + 0xf0, 0x2e, 0x9e, 0xcf, 0x96, 0x78, 0x12, 0x9e, 0x70, 0x5f, 0xc3, 0xe7, 0xb3, 0x37, 0x7a, 0xa6, + 0x27, 0x7a, 0x54, 0x87, 0x2b, 0x42, 0x8f, 0x72, 0xc5, 0x99, 0x86, 0xd1, 0x05, 0x65, 0xca, 0x21, + 0x95, 0x31, 0x9f, 0x31, 0x49, 0x53, 0xb2, 0xc2, 0xff, 0xf9, 0x7f, 0x6d, 0x10, 0xd1, 0x05, 0x49, + 0xc3, 0x7f, 0xee, 0x1b, 0x95, 0x5b, 0xd0, 0x3c, 0x61, 0x71, 0xc6, 0x29, 0x93, 0xe8, 0x31, 0x1c, + 0x84, 0x71, 0x9c, 0x13, 0x21, 0x88, 0xb0, 0xc1, 0xb0, 0x3f, 0x1e, 0x04, 0xbb, 0x65, 0xe1, 0x0e, + 0x9e, 0x34, 0x20, 0xee, 0xd6, 0xd1, 0x5b, 0x00, 0x61, 0xc4, 0x59, 0x4c, 0x25, 0xe5, 0x4c, 0xd8, + 0x0f, 0x86, 0x60, 0x6c, 0x1d, 0x9c, 0x7a, 0xeb, 0xb7, 0xcc, 0x6b, 0x74, 0x3c, 0x6d, 0x59, 0x03, + 0x74, 0x55, 0xb8, 0xbd, 0xb2, 0x70, 0x61, 0x87, 0xe1, 0xa5, 0xac, 0xe8, 0x57, 0x00, 0x51, 0x4c, + 0xb2, 0x9c, 0x44, 0xaa, 0xa6, 0x33, 0x9e, 0xf1, 0x29, 0x4f, 0x16, 0xb6, 0x31, 0xec, 0x8f, 0xad, + 0x83, 0xef, 0x36, 0x21, 0xc6, 0x3b, 0x5e, 0xa1, 0x3f, 0x61, 0x32, 0x5f, 0x04, 0x7b, 0xb5, 0x34, + 0xb4, 0x1a, 0x80, 0xef, 0xd0, 0x84, 0xc6, 0xd0, 0x64, 0x3c, 0x26, 0xa7, 0x61, 0x4a, 0xec, 0x0f, + 0x86, 0x60, 0x3c, 0x08, 0x76, 0xca, 0xc2, 0x35, 0x4f, 0x6b, 0x0c, 0xb7, 0xab, 0xe8, 0x13, 0xb8, + 0xf5, 0x23, 0x67, 0xc4, 0xde, 0xd6, 0x51, 0x66, 0x59, 0xb8, 0x5b, 0xaf, 0x39, 0x23, 0x58, 0xa3, + 0xe8, 0x12, 0x1a, 0x17, 0x94, 0x49, 0x61, 0x9b, 0xda, 0xf1, 0x17, 0x9b, 0x28, 0xf2, 0xb9, 0x22, + 0x0c, 0x06, 0x65, 0xe1, 0x1a, 0x7a, 0x88, 0xab, 0x14, 0x7b, 0x27, 0xf0, 0xe3, 0x7f, 0x29, 0x1f, + 0x3d, 0x84, 0xfd, 0x09, 0x59, 0xd8, 0x40, 0x69, 0xc4, 0x6a, 0x88, 0x3e, 0x82, 0xc6, 0x3c, 0x9c, + 0xce, 0x88, 0x3e, 0x0a, 0x03, 0x5c, 0x4d, 0x8e, 0x1e, 0x1c, 0x82, 0xd1, 0xcf, 0x00, 0xa2, 0xd5, + 0xe6, 0x22, 0x17, 0x1a, 0x39, 0x09, 0xe3, 0x8a, 0xc4, 0xac, 0xd2, 0x63, 0x05, 0xe0, 0x0a, 0x47, + 0x9f, 0xc2, 0x6d, 0x41, 0xf2, 0x39, 0x65, 0x89, 0xe6, 0x34, 0x03, 0xab, 0x2c, 0xdc, 0xed, 0x57, + 0x15, 0x84, 0x9b, 0x35, 0xb4, 0x0f, 0x2d, 0x49, 0xf2, 0x94, 0xb2, 0x50, 0xaa, 0xd0, 0xbe, 0x0e, + 0xfd, 0xb0, 0x2c, 0x5c, 0xeb, 0xac, 0x83, 0xf1, 0x72, 0xcc, 0xe8, 0x2d, 0x80, 0xbb, 0xb7, 0x8a, + 0x47, 0x3f, 0x40, 0xf3, 0x0d, 0xcf, 0x95, 0xcf, 0xd5, 0xd1, 0xb7, 0x0e, 0x9e, 0xfe, 0x1f, 0x67, + 0x9f, 0x55, 0x5c, 0xc1, 0xc3, 0xfa, 0x94, 0x98, 0x35, 0x20, 0x70, 0x9b, 0x66, 0xf4, 0x3b, 0x80, + 0x3b, 0x8d, 0x88, 0x97, 0x3c, 0x97, 0xaa, 0xf1, 0x4c, 0x1d, 0x0f, 0xd0, 0x35, 0x5e, 0x1f, 0x0d, + 0x8d, 0xa2, 0x4b, 0x68, 0xea, 0x3b, 0x1b, 0xf1, 0x69, 0x65, 0x71, 0x70, 0xaa, 0x88, 0x5f, 0xd6, + 0xd8, 0x4d, 0xe1, 0x7e, 0xb1, 0xd6, 0x9b, 0xe7, 0x35, 0x0c, 0xb8, 0xe5, 0x57, 0x4a, 0x32, 0x9e, + 0x4b, 0xed, 0xa5, 0x51, 0x29, 0x51, 0x0a, 0xb1, 0x46, 0x47, 0xbf, 0xf5, 0x3b, 0xf7, 0x5e, 0x4d, + 0x69, 0x44, 0x50, 0x0e, 0x4d, 0xf5, 0xf2, 0xc5, 0xa1, 0x0c, 0xb5, 0x7a, 0xeb, 0x20, 0xb8, 0x9f, + 0x7b, 0xc2, 0x53, 0xfb, 0x95, 0x73, 0xdf, 0x9e, 0x5f, 0x92, 0x48, 0x7e, 0x43, 0x64, 0xd8, 0xdd, + 0xfe, 0x0e, 0xc3, 0x6d, 0x1e, 0x74, 0x0c, 0xad, 0xfa, 0x35, 0x3a, 0x5b, 0x64, 0xc4, 0xde, 0xd2, + 0x96, 0x8c, 0xea, 0x2d, 0xd6, 0x93, 0x6e, 0xe9, 0xe6, 0xf6, 0x14, 0x2f, 0x6f, 0x43, 0x33, 0x38, + 0x20, 0x75, 0x29, 0xea, 0x11, 0x53, 0x8d, 0x3f, 0xde, 0xc4, 0x95, 0x0a, 0x1e, 0xd5, 0x4a, 0x06, + 0x0d, 0x22, 0x70, 0x97, 0x09, 0xa5, 0xd0, 0x50, 0x56, 0x0a, 0xbb, 0xaf, 0x53, 0x3e, 0xdf, 0x44, + 0x4a, 0xd5, 0xa1, 0x60, 0xb7, 0x4e, 0x6b, 0xa8, 0x99, 0xc0, 0x55, 0x96, 0xd1, 0x5f, 0x00, 0x3e, + 0xba, 0xd5, 0xb1, 0xaf, 0xa9, 0x90, 0x88, 0xad, 0x74, 0xed, 0xcb, 0x75, 0xbb, 0xa6, 0xf8, 0x74, + 0xcf, 0xda, 0x03, 0xdf, 0x20, 0x4b, 0x1d, 0x63, 0xd0, 0xa0, 0x92, 0xa4, 0x8d, 0xcf, 0x1b, 0x79, + 0xba, 0x74, 0x35, 0x5d, 0xd5, 0x2f, 0x14, 0x3f, 0xae, 0xd2, 0x8c, 0x1e, 0xc3, 0xed, 0xfa, 0xda, + 0xa1, 0xe1, 0xad, 0xab, 0xb5, 0x53, 0x87, 0x2f, 0x5d, 0xaf, 0xe0, 0xfb, 0xab, 0x6b, 0xa7, 0xf7, + 0xee, 0xda, 0xe9, 0xbd, 0xbf, 0x76, 0x7a, 0x3f, 0x95, 0x0e, 0xb8, 0x2a, 0x1d, 0xf0, 0xae, 0x74, + 0xc0, 0xfb, 0xd2, 0x01, 0x7f, 0x94, 0x0e, 0xf8, 0xe5, 0x4f, 0xa7, 0xf7, 0xfa, 0x68, 0xfd, 0xdf, + 0x9b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x58, 0x2a, 0x4a, 0x0d, 0x1b, 0x09, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Zone != nil { + i -= len(*m.Zone) + copy(dAtA[i:], *m.Zone) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Zone))) + i-- + dAtA[i] = 0x3a + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } + if len(m.DeprecatedTopology) > 0 { + keysForDeprecatedTopology := make([]string, 0, len(m.DeprecatedTopology)) + for k := range m.DeprecatedTopology { + keysForDeprecatedTopology = append(keysForDeprecatedTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology) + for iNdEx := len(keysForDeprecatedTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.DeprecatedTopology[string(keysForDeprecatedTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForDeprecatedTopology[iNdEx]) + copy(dAtA[i:], keysForDeprecatedTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDeprecatedTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ForZones) > 0 { + for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ForZones[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ForZone) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForZone) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ForZone) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DeprecatedTopology) > 0 { + for k, v := range m.DeprecatedTopology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Zone != nil { + l = len(*m.Zone) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } + return n +} + +func (m *EndpointHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ForZones) > 0 { + for _, e := range m.ForZones { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ForZone) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForDeprecatedTopology := make([]string, 0, len(this.DeprecatedTopology)) + for k := range this.DeprecatedTopology { + keysForDeprecatedTopology = append(keysForDeprecatedTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology) + mapStringForDeprecatedTopology := "map[string]string{" + for _, k := range keysForDeprecatedTopology { + mapStringForDeprecatedTopology += fmt.Sprintf("%v: %v,", k, this.DeprecatedTopology[k]) + } + mapStringForDeprecatedTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `DeprecatedTopology:` + mapStringForDeprecatedTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `Zone:` + valueToStringGenerated(this.Zone) + `,`, + `Hints:` + strings.Replace(this.Hints.String(), "EndpointHints", "EndpointHints", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForForZones := "[]ForZone{" + for _, f := range this.ForZones { + repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + "," + } + repeatedStringForForZones += "}" + s := strings.Join([]string{`&EndpointHints{`, + `ForZones:` + repeatedStringForForZones + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ForZone) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ForZone{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedTopology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeprecatedTopology == nil { + m.DeprecatedTopology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DeprecatedTopology[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Zone = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &EndpointHints{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForZones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForZones = append(m.ForZones, ForZone{}) + if err := m.ForZones[len(m.ForZones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ForZone) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForZone: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForZone: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto new file mode 100644 index 0000000000..d893485a0b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1; + +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto"; +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // deprecatedTopology contains topology information part of the v1beta1 + // API. This field is deprecated, and will be removed when the v1beta1 + // API is removed (no sooner than kubernetes v1.24). While this field can + // hold values, it is not writable through the v1 API, and any attempts to + // write to it will be silently ignored. Topology information can be found + // in the zone and nodeName fields instead. + // +optional + map deprecatedTopology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. + // +optional + optional string nodeName = 6; + + // zone is the name of the Zone this endpoint exists in. + // +optional + optional string zone = 7; + + // hints contains information associated with how an endpoint should be + // consumed. + // +optional + optional EndpointHints hints = 8; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. + // +optional + optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. + // +optional + optional bool terminating = 3; +} + +// EndpointHints provides hints describing how an endpoint should be consumed. +message EndpointHints { + // forZones indicates the zone(s) this endpoint should be consumed by to + // enable topology aware routing. + // +listType=atomic + repeated ForZone forZones = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +// +structType=atomic +message EndpointPort { + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + optional string name = 1; + + // protocol represents the IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // port represents the port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of endpoint slices + repeated EndpointSlice items = 2; +} + +// ForZone provides information about which zones should consume this endpoint. +message ForZone { + // name represents the name of the zone. + optional string name = 1; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go new file mode 100644 index 0000000000..228e1cacfa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go new file mode 100644 index 0000000000..51a23485e2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1 + +import ( + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + slim_metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +// +enum +type AddressType string + +const ( + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(slim_corev1.IPv4Protocol) + + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(slim_corev1.IPv6Protocol) + + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + + // deprecatedTopology contains topology information part of the v1beta1 + // API. This field is deprecated, and will be removed when the v1beta1 + // API is removed (no sooner than kubernetes v1.24). While this field can + // hold values, it is not writable through the v1 API, and any attempts to + // write to it will be silently ignored. Topology information can be found + // in the zone and nodeName fields instead. + // +optional + DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty" protobuf:"bytes,5,opt,name=deprecatedTopology"` + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + + // zone is the name of the Zone this endpoint exists in. + // +optional + Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + + // hints contains information associated with how an endpoint should be + // consumed. + // +optional + Hints *EndpointHints `json:"hints,omitempty" protobuf:"bytes,8,opt,name=hints"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` +} + +// EndpointHints provides hints describing how an endpoint should be consumed. +type EndpointHints struct { + // forZones indicates the zone(s) this endpoint should be consumed by to + // enable topology aware routing. + // +listType=atomic + ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"` +} + +// ForZone provides information about which zones should consume this endpoint. +type ForZone struct { + // name represents the name of the zone. + Name string `json:"name" protobuf:"bytes,1,name=name"` +} + +// EndpointPort represents a Port used by an EndpointSlice +// +structType=atomic +type EndpointPort struct { + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + + // protocol represents the IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + + // port represents the port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + slim_metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // +optional + slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of endpoint slices + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go new file mode 100644 index 0000000000..e2b671a3a3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" + // LabelSkipMirror can be set to true on an Endpoints resource to indicate + // that the EndpointSliceMirroring controller should not mirror this + // resource with EndpointSlices. + LabelSkipMirror = "endpointslice.kubernetes.io/skip-mirror" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..84538aa06b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go @@ -0,0 +1,230 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.DeprecatedTopology != nil { + in, out := &in.DeprecatedTopology, &out.DeprecatedTopology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.Hints != nil { + in, out := &in.Hints, &out.Hints + *out = new(EndpointHints) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointHints) DeepCopyInto(out *EndpointHints) { + *out = *in + if in.ForZones != nil { + in, out := &in.ForZones, &out.ForZones + *out = make([]ForZone, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints. +func (in *EndpointHints) DeepCopy() *EndpointHints { + if in == nil { + return nil + } + out := new(EndpointHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(corev1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForZone) DeepCopyInto(out *ForZone) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone. +func (in *ForZone) DeepCopy() *ForZone { + if in == nil { + return nil + } + out := new(ForZone) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go new file mode 100644 index 0000000000..5a03388558 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go @@ -0,0 +1,284 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package v1 + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Endpoint) DeepEqual(other *Endpoint) bool { + if other == nil { + return false + } + + if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) { + in, other := &in.Addresses, &other.Addresses + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if !in.Conditions.DeepEqual(&other.Conditions) { + return false + } + + if ((in.DeprecatedTopology != nil) && (other.DeprecatedTopology != nil)) || ((in.DeprecatedTopology == nil) != (other.DeprecatedTopology == nil)) { + in, other := &in.DeprecatedTopology, &other.DeprecatedTopology + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } + + if (in.NodeName == nil) != (other.NodeName == nil) { + return false + } else if in.NodeName != nil { + if *in.NodeName != *other.NodeName { + return false + } + } + + if (in.Zone == nil) != (other.Zone == nil) { + return false + } else if in.Zone != nil { + if *in.Zone != *other.Zone { + return false + } + } + + if (in.Hints == nil) != (other.Hints == nil) { + return false + } else if in.Hints != nil { + if !in.Hints.DeepEqual(other.Hints) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointConditions) DeepEqual(other *EndpointConditions) bool { + if other == nil { + return false + } + + if (in.Ready == nil) != (other.Ready == nil) { + return false + } else if in.Ready != nil { + if *in.Ready != *other.Ready { + return false + } + } + + if (in.Serving == nil) != (other.Serving == nil) { + return false + } else if in.Serving != nil { + if *in.Serving != *other.Serving { + return false + } + } + + if (in.Terminating == nil) != (other.Terminating == nil) { + return false + } else if in.Terminating != nil { + if *in.Terminating != *other.Terminating { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointHints) DeepEqual(other *EndpointHints) bool { + if other == nil { + return false + } + + if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) { + in, other := &in.ForZones, &other.ForZones + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointPort) DeepEqual(other *EndpointPort) bool { + if other == nil { + return false + } + + if (in.Name == nil) != (other.Name == nil) { + return false + } else if in.Name != nil { + if *in.Name != *other.Name { + return false + } + } + + if (in.Protocol == nil) != (other.Protocol == nil) { + return false + } else if in.Protocol != nil { + if *in.Protocol != *other.Protocol { + return false + } + } + + if (in.Port == nil) != (other.Port == nil) { + return false + } else if in.Port != nil { + if *in.Port != *other.Port { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointSlice) DeepEqual(other *EndpointSlice) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) { + return false + } + + if in.AddressType != other.AddressType { + return false + } + if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) { + in, other := &in.Endpoints, &other.Endpoints + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointSliceList) DeepEqual(other *EndpointSliceList) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ListMeta.DeepEqual(&other.ListMeta) { + return false + } + + if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) { + in, other := &in.Items, &other.Items + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ForZone) DeepEqual(other *ForZone) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go new file mode 100644 index 0000000000..67e6a87697 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +deepequal-gen=package +// +groupName=discovery.k8s.io + +// Package v1beta1 contains slimmer versions of k8s discovery types. +package v1beta1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go new file mode 100644 index 0000000000..3280b5e8f6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go @@ -0,0 +1,1992 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointHints) Reset() { *m = EndpointHints{} } +func (*EndpointHints) ProtoMessage() {} +func (*EndpointHints) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{2} +} +func (m *EndpointHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointHints.Merge(m, src) +} +func (m *EndpointHints) XXX_Size() int { + return m.Size() +} +func (m *EndpointHints) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointHints.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointHints proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{3} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{4} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{5} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func (m *ForZone) Reset() { *m = ForZone{} } +func (*ForZone) ProtoMessage() {} +func (*ForZone) Descriptor() ([]byte, []int) { + return fileDescriptor_db2884f8c881be40, []int{6} +} +func (m *ForZone) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ForZone) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForZone.Merge(m, src) +} +func (m *ForZone) XXX_Size() int { + return m.Size() +} +func (m *ForZone) XXX_DiscardUnknown() { + xxx_messageInfo_ForZone.DiscardUnknown(m) +} + +var xxx_messageInfo_ForZone proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointConditions") + proto.RegisterType((*EndpointHints)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointHints") + proto.RegisterType((*EndpointPort)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointSliceList") + proto.RegisterType((*ForZone)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.ForZone") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto", fileDescriptor_db2884f8c881be40) +} + +var fileDescriptor_db2884f8c881be40 = []byte{ + // 806 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x8b, 0xdb, 0x46, + 0x1c, 0xf5, 0xc4, 0xab, 0xae, 0x34, 0xda, 0xa5, 0xc9, 0xd0, 0x83, 0x59, 0x8a, 0x64, 0x04, 0x05, + 0x43, 0xa8, 0x84, 0x7d, 0x28, 0x4b, 0x0b, 0x4d, 0xa3, 0x76, 0x43, 0x02, 0x8d, 0x13, 0x66, 0x17, + 0x0a, 0xb9, 0xc9, 0xd2, 0x44, 0x3b, 0xb1, 0xa5, 0x11, 0x9a, 0xb1, 0xc1, 0x3d, 0xf5, 0x52, 0x72, + 0x6c, 0x3f, 0x4a, 0x4f, 0x85, 0x7e, 0x83, 0x3d, 0xe6, 0x98, 0x93, 0xe8, 0xaa, 0x5f, 0xa2, 0xe4, + 0x54, 0x66, 0xf4, 0xcf, 0xae, 0x0b, 0x65, 0x77, 0x7d, 0xf2, 0xcc, 0x9b, 0x99, 0xf7, 0xde, 0xfc, + 0x9e, 0x7e, 0x83, 0xe1, 0x8b, 0x98, 0x8a, 0xcb, 0xe5, 0xcc, 0x0d, 0x59, 0xe2, 0x85, 0x74, 0x41, + 0x97, 0xed, 0x4f, 0x36, 0x8f, 0xbd, 0xf9, 0x29, 0xf7, 0xf8, 0x82, 0x26, 0x6a, 0x10, 0x64, 0xd4, + 0x8b, 0x28, 0x0f, 0xd9, 0x8a, 0xe4, 0x6b, 0x6f, 0x35, 0x9e, 0x11, 0x11, 0x8c, 0xbd, 0x98, 0xa4, + 0x24, 0x0f, 0x04, 0x89, 0xdc, 0x2c, 0x67, 0x82, 0xa1, 0x47, 0x1d, 0xa1, 0x5b, 0x31, 0x35, 0x3f, + 0xd9, 0x3c, 0x76, 0xe7, 0xa7, 0xdc, 0x95, 0x84, 0x6a, 0x10, 0x64, 0xd4, 0x6d, 0x09, 0xdd, 0x9a, + 0xf0, 0xe4, 0xec, 0x66, 0x8e, 0x42, 0x96, 0x13, 0x6f, 0xb5, 0xe3, 0xe3, 0xe4, 0xc9, 0x8d, 0x68, + 0xb8, 0x97, 0x10, 0x11, 0xfc, 0x17, 0xcf, 0xe7, 0x1b, 0x3c, 0x31, 0x8b, 0x99, 0xa7, 0xe0, 0xd9, + 0xf2, 0xb5, 0x9a, 0xa9, 0x89, 0x1a, 0xd5, 0xdb, 0x25, 0xa1, 0x4b, 0x99, 0xe4, 0x4c, 0x82, 0xf0, + 0x92, 0xa6, 0xb2, 0x56, 0x52, 0x31, 0x5f, 0xa6, 0x82, 0x26, 0x64, 0x87, 0xff, 0x8b, 0xff, 0x3b, + 0xc0, 0xc3, 0x4b, 0x92, 0x04, 0xff, 0x3e, 0xe7, 0xfc, 0xd6, 0x87, 0xfa, 0x59, 0x1a, 0x65, 0x8c, + 0xa6, 0x02, 0x3d, 0x84, 0x46, 0x10, 0x45, 0x39, 0xe1, 0x9c, 0xf0, 0x01, 0x18, 0xf6, 0x47, 0x86, + 0x7f, 0x5c, 0x16, 0xb6, 0xf1, 0xb8, 0x01, 0x71, 0xb7, 0x8e, 0xde, 0x02, 0x08, 0x43, 0x96, 0x46, + 0x54, 0x50, 0x96, 0xf2, 0xc1, 0xbd, 0x21, 0x18, 0x99, 0x93, 0x73, 0xf7, 0x8e, 0xb9, 0xb9, 0x8d, + 0x99, 0x6f, 0x5b, 0x6a, 0x1f, 0x5d, 0x15, 0x76, 0xaf, 0x2c, 0x6c, 0xd8, 0x61, 0x78, 0x43, 0x1a, + 0xfd, 0x0c, 0xa0, 0x2e, 0x58, 0xc6, 0x16, 0x2c, 0x5e, 0x0f, 0xb4, 0x61, 0x7f, 0x64, 0x4e, 0x7e, + 0xd8, 0x9b, 0x0f, 0xf7, 0xa2, 0x66, 0x3e, 0x4b, 0x45, 0xbe, 0xf6, 0xef, 0xd7, 0x5e, 0xf4, 0x06, + 0xc6, 0xad, 0x34, 0x1a, 0x41, 0x3d, 0x65, 0x11, 0x99, 0x06, 0x09, 0x19, 0x7c, 0x34, 0x04, 0x23, + 0xc3, 0x3f, 0x92, 0x3b, 0xa7, 0x35, 0x86, 0xdb, 0xd5, 0x93, 0xaf, 0xe0, 0xf1, 0x16, 0x2d, 0xba, + 0x0f, 0xfb, 0x73, 0xb2, 0x1e, 0x00, 0x79, 0x0a, 0xcb, 0x21, 0xfa, 0x04, 0x6a, 0xab, 0x60, 0xb1, + 0x24, 0xaa, 0xb0, 0x06, 0xae, 0x26, 0x5f, 0xde, 0x3b, 0x05, 0xce, 0x2f, 0x00, 0xa2, 0xdd, 0x2a, + 0x21, 0x1b, 0x6a, 0x39, 0x09, 0xa2, 0x8a, 0x44, 0xf7, 0x8d, 0xb2, 0xb0, 0x35, 0x2c, 0x01, 0x5c, + 0xe1, 0xe8, 0x33, 0x78, 0xc8, 0x49, 0xbe, 0xa2, 0x69, 0xac, 0x38, 0x75, 0xdf, 0x2c, 0x0b, 0xfb, + 0xf0, 0xbc, 0x82, 0x70, 0xb3, 0x86, 0xc6, 0xd0, 0x14, 0x24, 0x4f, 0x68, 0x1a, 0x08, 0xb9, 0xb5, + 0xaf, 0xb6, 0x7e, 0x5c, 0x16, 0xb6, 0x79, 0xd1, 0xc1, 0x78, 0x73, 0x8f, 0xf3, 0x16, 0xc0, 0xe3, + 0xc6, 0xd1, 0x53, 0x9a, 0x0a, 0x8e, 0x56, 0x50, 0x7f, 0xcd, 0xf2, 0x57, 0x2c, 0xad, 0x3f, 0x24, + 0x73, 0xf2, 0xf4, 0xce, 0x89, 0x3c, 0xa9, 0x08, 0xbb, 0x08, 0x6a, 0x80, 0xe3, 0x56, 0xcb, 0xf9, + 0x1d, 0xc0, 0xa3, 0xc6, 0xc9, 0x4b, 0x96, 0x0b, 0xf4, 0x29, 0x3c, 0x48, 0x65, 0x1e, 0xaa, 0xb2, + 0xbe, 0x5e, 0x16, 0xf6, 0x81, 0xca, 0x42, 0xa1, 0xe8, 0x0d, 0xd4, 0x55, 0x1b, 0x84, 0x6c, 0x51, + 0xd5, 0xd9, 0x9f, 0x4a, 0xe2, 0x97, 0x35, 0xf6, 0xa1, 0xb0, 0xbf, 0xbe, 0xd5, 0x33, 0xe2, 0x36, + 0x0c, 0xb8, 0xe5, 0x97, 0x4e, 0x32, 0x96, 0x0b, 0x55, 0x50, 0xad, 0x72, 0x22, 0x1d, 0x62, 0x85, + 0x3a, 0x7f, 0xf4, 0xbb, 0x12, 0x9e, 0x2f, 0x68, 0x48, 0x50, 0x0e, 0x75, 0xf9, 0x98, 0x44, 0x81, + 0x08, 0x94, 0x7b, 0x73, 0xe2, 0xdf, 0xac, 0x84, 0xdc, 0x95, 0xe7, 0xdd, 0xd5, 0xd8, 0x7d, 0x31, + 0x7b, 0x43, 0x42, 0xf1, 0x9c, 0x88, 0xa0, 0xeb, 0xa5, 0x0e, 0xc3, 0xad, 0x0e, 0xfa, 0x0e, 0x9a, + 0x75, 0x83, 0x5f, 0xac, 0x33, 0x32, 0x38, 0x50, 0x25, 0x71, 0xea, 0x23, 0xe6, 0xe3, 0x6e, 0xe9, + 0xc3, 0xf6, 0x14, 0x6f, 0x1e, 0x43, 0x3f, 0x42, 0x83, 0xd4, 0x57, 0x91, 0xef, 0x82, 0x4c, 0xff, + 0xd9, 0xde, 0xfa, 0xd1, 0x7f, 0x50, 0xdb, 0x31, 0x1a, 0x84, 0xe3, 0x4e, 0x0e, 0xe5, 0x50, 0x93, + 0xf5, 0xe4, 0x83, 0xbe, 0xd2, 0x7d, 0xbe, 0x37, 0x5d, 0x99, 0x95, 0x7f, 0x5c, 0x6b, 0x6b, 0x72, + 0xc6, 0x71, 0x25, 0xe5, 0xfc, 0x0d, 0xe0, 0x83, 0xad, 0xec, 0xbe, 0xa7, 0x5c, 0xa0, 0x74, 0x27, + 0xbf, 0x6f, 0x6e, 0x9b, 0x9f, 0xe4, 0x53, 0xe9, 0xb5, 0x9f, 0x7e, 0x83, 0x6c, 0x64, 0xc7, 0xa1, + 0x46, 0x05, 0x49, 0x9a, 0x8a, 0x4f, 0xf7, 0x76, 0x73, 0x75, 0xa5, 0xee, 0xea, 0xcf, 0xa4, 0x08, + 0xae, 0xb4, 0x9c, 0x87, 0xf0, 0xb0, 0xee, 0x42, 0x34, 0xdc, 0xea, 0xb4, 0xa3, 0x7a, 0xfb, 0x46, + 0xb7, 0xf9, 0xe4, 0xea, 0xda, 0xea, 0xbd, 0xbb, 0xb6, 0x7a, 0xef, 0xaf, 0xad, 0xde, 0x4f, 0xa5, + 0x05, 0xae, 0x4a, 0x0b, 0xbc, 0x2b, 0x2d, 0xf0, 0xbe, 0xb4, 0xc0, 0x9f, 0xa5, 0x05, 0x7e, 0xfd, + 0xcb, 0xea, 0xbd, 0x7a, 0x74, 0xc7, 0xbf, 0x12, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x0c, + 0x62, 0x8d, 0x8c, 0x08, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ForZones) > 0 { + for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ForZones[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ForZone) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForZone) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ForZone) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } + return n +} + +func (m *EndpointHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ForZones) > 0 { + for _, e := range m.ForZones { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ForZone) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForForZones := "[]ForZone{" + for _, f := range this.ForZones { + repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + "," + } + repeatedStringForForZones += "}" + s := strings.Join([]string{`&EndpointHints{`, + `ForZones:` + repeatedStringForForZones + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ForZone) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ForZone{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForZones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForZones = append(m.ForZones, ForZone{}) + if err := m.ForZones[len(m.ForZones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ForZone) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForZone: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForZone: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto new file mode 100644 index 0000000000..5c4d9aafbf --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1; + +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto"; +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. + // +optional + map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. + // +optional + optional string nodeName = 6; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. + // +optional + optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. + // +optional + optional bool terminating = 3; +} + +// EndpointHints provides hints describing how an endpoint should be consumed. +message EndpointHints { + // forZones indicates the zone(s) this endpoint should be consumed by to + // enable topology aware routing. May contain a maximum of 8 entries. + // +listType=atomic + repeated ForZone forZones = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + optional string name = 1; + + // protocol represents the IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // port represents the port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of endpoint slices + repeated EndpointSlice items = 2; +} + +// ForZone provides information about which zones should consume this endpoint. +message ForZone { + // name represents the name of the zone. + optional string name = 1; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go new file mode 100644 index 0000000000..9040aba7b9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go new file mode 100644 index 0000000000..499efe3370 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1beta1 + +import ( + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 +// +k8s:prerelease-lifecycle-gen:deprecated=1.21 +// +k8s:prerelease-lifecycle-gen:removed=1.25 +// +k8s:prerelease-lifecycle-gen:replacement=discovery.k8s.io,v1,EndpointSlice + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + slim_metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // +optional + slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(slim_corev1.IPv4Protocol) + + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(slim_corev1.IPv6Protocol) + + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` +} + +// EndpointHints provides hints describing how an endpoint should be consumed. +type EndpointHints struct { + // forZones indicates the zone(s) this endpoint should be consumed by to + // enable topology aware routing. May contain a maximum of 8 entries. + // +listType=atomic + ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"` +} + +// ForZone provides information about which zones should consume this endpoint. +type ForZone struct { + // name represents the name of the zone. + Name string `json:"name" protobuf:"bytes,1,name=name"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + + // protocol represents the IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + + // port represents the port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 +// +k8s:prerelease-lifecycle-gen:deprecated=1.21 +// +k8s:prerelease-lifecycle-gen:removed=1.25 +// +k8s:prerelease-lifecycle-gen:replacement=discovery.k8s.io,v1,EndpointSlice + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + slim_metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // +optional + slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of endpoint slices + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go new file mode 100644 index 0000000000..b864b949c7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1beta1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" + // LabelSkipMirror can be set to true on an Endpoints resource to indicate + // that the EndpointSliceMirroring controller should not mirror this + // resource with EndpointSlices. + LabelSkipMirror = "endpointslice.kubernetes.io/skip-mirror" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..cbfe0d129d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,220 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointHints) DeepCopyInto(out *EndpointHints) { + *out = *in + if in.ForZones != nil { + in, out := &in.ForZones, &out.ForZones + *out = make([]ForZone, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints. +func (in *EndpointHints) DeepCopy() *EndpointHints { + if in == nil { + return nil + } + out := new(EndpointHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForZone) DeepCopyInto(out *ForZone) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone. +func (in *ForZone) DeepCopy() *ForZone { + if in == nil { + return nil + } + out := new(ForZone) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go new file mode 100644 index 0000000000..e25dc21f58 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go @@ -0,0 +1,268 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package v1beta1 + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Endpoint) DeepEqual(other *Endpoint) bool { + if other == nil { + return false + } + + if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) { + in, other := &in.Addresses, &other.Addresses + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if !in.Conditions.DeepEqual(&other.Conditions) { + return false + } + + if ((in.Topology != nil) && (other.Topology != nil)) || ((in.Topology == nil) != (other.Topology == nil)) { + in, other := &in.Topology, &other.Topology + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } + + if (in.NodeName == nil) != (other.NodeName == nil) { + return false + } else if in.NodeName != nil { + if *in.NodeName != *other.NodeName { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointConditions) DeepEqual(other *EndpointConditions) bool { + if other == nil { + return false + } + + if (in.Ready == nil) != (other.Ready == nil) { + return false + } else if in.Ready != nil { + if *in.Ready != *other.Ready { + return false + } + } + + if (in.Serving == nil) != (other.Serving == nil) { + return false + } else if in.Serving != nil { + if *in.Serving != *other.Serving { + return false + } + } + + if (in.Terminating == nil) != (other.Terminating == nil) { + return false + } else if in.Terminating != nil { + if *in.Terminating != *other.Terminating { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointHints) DeepEqual(other *EndpointHints) bool { + if other == nil { + return false + } + + if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) { + in, other := &in.ForZones, &other.ForZones + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointPort) DeepEqual(other *EndpointPort) bool { + if other == nil { + return false + } + + if (in.Name == nil) != (other.Name == nil) { + return false + } else if in.Name != nil { + if *in.Name != *other.Name { + return false + } + } + + if (in.Protocol == nil) != (other.Protocol == nil) { + return false + } else if in.Protocol != nil { + if *in.Protocol != *other.Protocol { + return false + } + } + + if (in.Port == nil) != (other.Port == nil) { + return false + } else if in.Port != nil { + if *in.Port != *other.Port { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointSlice) DeepEqual(other *EndpointSlice) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) { + return false + } + + if in.AddressType != other.AddressType { + return false + } + if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) { + in, other := &in.Endpoints, &other.Endpoints + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointSliceList) DeepEqual(other *EndpointSliceList) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ListMeta.DeepEqual(&other.ListMeta) { + return false + } + + if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) { + in, other := &in.Items, &other.Items + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ForZone) DeepEqual(other *ForZone) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go new file mode 100644 index 0000000000..269888d36d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +deepequal-gen=package +// +groupName=networking.k8s.io + +// Package v1 contains slimmer versions of k8s networking types. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go new file mode 100644 index 0000000000..bd02e24d06 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go @@ -0,0 +1,2195 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5d3be2d57d520df2, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IPBlock)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IPBlock") + proto.RegisterType((*NetworkPolicy)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicySpec") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto", fileDescriptor_5d3be2d57d520df2) +} + +var fileDescriptor_5d3be2d57d520df2 = []byte{ + // 838 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcf, 0x6f, 0xe4, 0x34, + 0x14, 0x9e, 0x64, 0xa6, 0x9d, 0xae, 0xcb, 0xb2, 0xd4, 0x08, 0x31, 0x5a, 0x89, 0xa4, 0x8a, 0x84, + 0xb4, 0x17, 0x1c, 0x95, 0x03, 0x5a, 0x69, 0x25, 0x40, 0x61, 0x67, 0xd1, 0x2c, 0xd0, 0x8e, 0xdc, + 0x72, 0xe1, 0x96, 0xc9, 0x78, 0x53, 0x77, 0x92, 0x38, 0xb2, 0x3d, 0x65, 0x2b, 0xed, 0x61, 0x39, + 0x21, 0x21, 0x21, 0xf1, 0x67, 0xf5, 0xb8, 0x12, 0x97, 0x3d, 0x45, 0x34, 0xfc, 0x0b, 0x70, 0xe9, + 0x09, 0xd9, 0x71, 0x26, 0xf3, 0x63, 0x11, 0x9a, 0x76, 0x56, 0x9c, 0x12, 0xbf, 0xd8, 0xdf, 0xf7, + 0xbd, 0xe7, 0xcf, 0xcf, 0x01, 0xdf, 0xc4, 0x54, 0x9e, 0x4e, 0x47, 0x28, 0x62, 0xa9, 0x1f, 0xd1, + 0x84, 0x4e, 0x67, 0x8f, 0x7c, 0x12, 0xfb, 0x93, 0x87, 0xc2, 0x17, 0x09, 0x4d, 0xf5, 0x4b, 0x98, + 0x53, 0x3f, 0x23, 0xf2, 0x47, 0xc6, 0x27, 0x34, 0x8b, 0xfd, 0xf3, 0x03, 0x3f, 0x26, 0x19, 0xe1, + 0xa1, 0x24, 0x63, 0x94, 0x73, 0x26, 0x19, 0x7c, 0xd4, 0x80, 0xa1, 0x0a, 0xa5, 0x7e, 0xe4, 0x93, + 0x18, 0x4d, 0x1e, 0x0a, 0xa4, 0xc0, 0xf4, 0x4b, 0x98, 0x53, 0xd4, 0x80, 0xa1, 0xf3, 0x83, 0xfb, + 0xfd, 0xf5, 0x94, 0x44, 0x8c, 0x93, 0x37, 0x68, 0xb8, 0xff, 0x64, 0x2d, 0x18, 0xe1, 0xa7, 0x44, + 0x86, 0x6f, 0xc2, 0x79, 0xba, 0x26, 0xce, 0x54, 0xd2, 0xc4, 0xa7, 0x99, 0x14, 0x92, 0xaf, 0x60, + 0x7d, 0x32, 0x87, 0x15, 0xb3, 0x98, 0xf9, 0x3a, 0x3c, 0x9a, 0x3e, 0xd3, 0x23, 0x3d, 0xd0, 0x6f, + 0x66, 0xba, 0x02, 0x45, 0x94, 0x29, 0xdc, 0x34, 0x8c, 0x4e, 0x69, 0x46, 0xf8, 0x85, 0x66, 0xe5, + 0xd3, 0x4c, 0xd2, 0x94, 0xac, 0xe0, 0x7f, 0xf6, 0x5f, 0x0b, 0x44, 0x74, 0x4a, 0xd2, 0x70, 0x79, + 0x9d, 0x77, 0x04, 0xba, 0x83, 0x61, 0x90, 0xb0, 0x68, 0x02, 0xf7, 0x41, 0x27, 0xa2, 0x63, 0xde, + 0xb3, 0xf6, 0xad, 0x07, 0x77, 0x82, 0x77, 0x2e, 0x0b, 0xb7, 0x55, 0x16, 0x6e, 0xe7, 0xab, 0xc1, + 0x63, 0x8c, 0xf5, 0x17, 0xe8, 0x81, 0x6d, 0xf2, 0x3c, 0x22, 0xb9, 0xec, 0xd9, 0xfb, 0xed, 0x07, + 0x77, 0x02, 0x50, 0x16, 0xee, 0x76, 0x5f, 0x47, 0xb0, 0xf9, 0xe2, 0xfd, 0x6d, 0x81, 0xbb, 0x87, + 0xd5, 0xae, 0x0e, 0x59, 0x42, 0xa3, 0x0b, 0xc8, 0xc1, 0x8e, 0xaa, 0xf0, 0x38, 0x94, 0xa1, 0xc6, + 0xde, 0xfd, 0x34, 0x40, 0x6b, 0xb9, 0x44, 0x20, 0xb5, 0x1e, 0x9d, 0x1f, 0xa0, 0xa3, 0xd1, 0x19, + 0x89, 0xe4, 0x77, 0x44, 0x86, 0x01, 0x34, 0xfa, 0x40, 0x13, 0xc3, 0x33, 0x1e, 0x98, 0x83, 0x8e, + 0xc8, 0x49, 0xd4, 0xb3, 0x35, 0xdf, 0x21, 0xba, 0x85, 0x2b, 0xd1, 0x42, 0x36, 0xc7, 0x39, 0x89, + 0x9a, 0xda, 0xa8, 0x11, 0xd6, 0x4c, 0xde, 0xb5, 0x05, 0x3e, 0x5c, 0x98, 0xd9, 0x8f, 0x39, 0x11, + 0x02, 0x4f, 0x13, 0x02, 0x05, 0xd8, 0xca, 0x19, 0x97, 0xa2, 0x67, 0xed, 0xb7, 0x37, 0x2b, 0x67, + 0xc8, 0xb8, 0x0c, 0xee, 0x1a, 0x39, 0x5b, 0x6a, 0x24, 0x70, 0xc5, 0x05, 0xcf, 0x80, 0x2d, 0x99, + 0xde, 0xa8, 0xcd, 0x32, 0x12, 0xc2, 0x03, 0x60, 0x18, 0xed, 0x13, 0x86, 0x6d, 0xc9, 0xbc, 0x9f, + 0x6c, 0xd0, 0x5b, 0x98, 0x35, 0xc8, 0xfe, 0xe7, 0xec, 0x73, 0xd0, 0x79, 0xc6, 0x59, 0xfa, 0x96, + 0xf2, 0x9f, 0x19, 0xe0, 0x09, 0x67, 0x29, 0xd6, 0x4c, 0xde, 0x5f, 0x16, 0xd8, 0x5b, 0x98, 0xf9, + 0x2d, 0x15, 0x12, 0x66, 0x2b, 0xe6, 0xff, 0xf2, 0xa6, 0xe6, 0x57, 0x78, 0xda, 0xfa, 0xef, 0x19, + 0xf6, 0x9d, 0x3a, 0x32, 0x67, 0x7c, 0x06, 0xb6, 0xa8, 0x24, 0xa9, 0x30, 0x89, 0x3f, 0xdd, 0x5c, + 0xe2, 0x4d, 0xa1, 0x07, 0x8a, 0x00, 0x57, 0x3c, 0xde, 0xcf, 0xed, 0xa5, 0xb4, 0x55, 0x81, 0xe0, + 0x73, 0xb0, 0x9b, 0xb3, 0xf1, 0x31, 0x49, 0x48, 0x24, 0x19, 0x37, 0x99, 0xf7, 0x6f, 0x9c, 0x79, + 0x38, 0x22, 0x49, 0x0d, 0x16, 0xdc, 0x2b, 0x0b, 0x77, 0x77, 0xd8, 0xa0, 0xe3, 0x79, 0x2a, 0xf8, + 0x8b, 0x05, 0xf6, 0xb2, 0x30, 0x25, 0x22, 0x0f, 0x23, 0x32, 0x13, 0x60, 0x6f, 0x52, 0xc0, 0x07, + 0x65, 0xe1, 0xee, 0x1d, 0x2e, 0x73, 0xe0, 0x55, 0x5a, 0x38, 0x01, 0x5d, 0x9a, 0xeb, 0xee, 0xda, + 0x6b, 0x6b, 0x05, 0x8f, 0x6f, 0xb5, 0x1f, 0xa6, 0x53, 0x07, 0xbb, 0x65, 0xe1, 0xd6, 0x6d, 0x1b, + 0xd7, 0x0c, 0xde, 0xaf, 0xf6, 0xf2, 0x4e, 0x30, 0x2e, 0xe1, 0x19, 0xd8, 0xd1, 0x9d, 0x3e, 0x62, + 0x89, 0xe9, 0xec, 0x87, 0xca, 0x3a, 0x43, 0x13, 0xbb, 0x2e, 0xdc, 0xcf, 0x6f, 0x74, 0xeb, 0xa2, + 0x1a, 0x01, 0xcf, 0xf0, 0x21, 0x01, 0x1d, 0x75, 0xfa, 0x4c, 0xb5, 0xbf, 0x5e, 0xb7, 0xda, 0xea, + 0xfe, 0x44, 0xd5, 0xfd, 0x89, 0x06, 0x99, 0x3c, 0xe2, 0xc7, 0x92, 0xd3, 0x2c, 0x0e, 0x76, 0xd4, + 0x49, 0x53, 0xc9, 0x60, 0x0d, 0x0f, 0x3f, 0x06, 0x5d, 0x92, 0x8d, 0x55, 0x40, 0x57, 0x75, 0xab, + 0xaa, 0x47, 0xbf, 0x0a, 0xe1, 0xfa, 0x9b, 0xf7, 0xfb, 0xb2, 0x33, 0x55, 0xb7, 0x86, 0x2f, 0xde, + 0xa2, 0x33, 0xdf, 0x37, 0x27, 0xe4, 0xdf, 0xdd, 0xf9, 0xd2, 0x02, 0x5d, 0x5a, 0xf5, 0x46, 0x73, + 0x42, 0xbf, 0xdf, 0xdc, 0x09, 0x9d, 0x6b, 0xba, 0xc1, 0x3d, 0x23, 0xa5, 0x5b, 0x07, 0x6b, 0x5a, + 0xf8, 0x02, 0x6c, 0x93, 0x4a, 0x40, 0x5b, 0x0b, 0x38, 0xd9, 0x9c, 0x80, 0xe6, 0xca, 0x0b, 0xde, + 0x35, 0xfc, 0xdb, 0x26, 0x66, 0x38, 0xe1, 0x17, 0xaa, 0xfc, 0x6a, 0xee, 0xc9, 0x45, 0x4e, 0x44, + 0xaf, 0xa3, 0xff, 0x23, 0x3e, 0xaa, 0x6a, 0x36, 0x0b, 0x5f, 0x17, 0x2e, 0x68, 0x86, 0x78, 0x7e, + 0x45, 0x10, 0x5e, 0x5e, 0x39, 0xad, 0x57, 0x57, 0x4e, 0xeb, 0xf5, 0x95, 0xd3, 0x7a, 0x59, 0x3a, + 0xd6, 0x65, 0xe9, 0x58, 0xaf, 0x4a, 0xc7, 0x7a, 0x5d, 0x3a, 0xd6, 0x1f, 0xa5, 0x63, 0xfd, 0xf6, + 0xa7, 0xd3, 0xfa, 0xe1, 0xd1, 0x2d, 0xfe, 0x69, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xf7, + 0x43, 0xc0, 0x11, 0x0b, 0x00, 0x00, +} + +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndPort != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.EndPort)) + i-- + dAtA[i] = 0x18 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto new file mode 100644 index 0000000000..c96ffa7d4e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1; + +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto"; +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"; + +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +message IPBlock { + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + optional string cidr = 1; + + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range + // +optional + repeated string except = 2; +} + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +message NetworkPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + + // spec represents the specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +message NetworkPolicyEgressRule { + // ports is a list of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // to is a list of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + repeated NetworkPolicyPeer to = 2; +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +message NetworkPolicyIngressRule { + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // from is a list of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +// NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of +// fields are allowed +message NetworkPolicyPeer { + // podSelector is a label selector which selects pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector podSelector = 1; + + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. + // + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector namespaceSelector = 2; + + // ipBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + optional IPBlock ipBlock = 3; +} + +// NetworkPolicyPort describes a port to allow traffic on +message NetworkPolicyPort { + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + optional string protocol = 1; + + // port represents the port on the given protocol. This can either be a numerical or named + // port on a pod. If this field is not provided, this matches all port names and + // numbers. + // If present, only traffic on the specified protocol AND port will be matched. + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString port = 2; + + // endPort indicates that the range of ports from port to endPort if set, inclusive, + // should be allowed by the policy. This field cannot be defined if the port field + // is not defined or if the port field is defined as a named (string) port. + // The endPort must be equal or greater than port. + // +optional + optional int32 endPort = 3; +} + +// NetworkPolicySpec provides the specification of a NetworkPolicy +message NetworkPolicySpec { + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector podSelector = 1; + + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + repeated NetworkPolicyIngressRule ingress = 2; + + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + repeated NetworkPolicyEgressRule egress = 3; + + // policyTypes is a list of rule types that the NetworkPolicy relates to. + // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + repeated string policyTypes = 4; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go new file mode 100644 index 0000000000..db5db6022e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go new file mode 100644 index 0000000000..99d749e1dc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +package v1 + +import ( + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +type NetworkPolicy struct { + slim_metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec represents the specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PolicyType string describes the NetworkPolicy type +// This type is beta-level in 1.8 +// +enum +type PolicyType string + +const ( + // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods + PolicyTypeIngress PolicyType = "Ingress" + // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods + PolicyTypeEgress PolicyType = "Egress" +) + +// NetworkPolicySpec provides the specification of a NetworkPolicy +type NetworkPolicySpec struct { + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector slim_metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` + + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` + + // policyTypes is a list of rule types that the NetworkPolicy relates to. + // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +type NetworkPolicyIngressRule struct { + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // from is a list of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // ports is a list of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // to is a list of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` +} + +// NetworkPolicyPort describes a port to allow traffic on +type NetworkPolicyPort struct { + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` + + // port represents the port on the given protocol. This can either be a numerical or named + // port on a pod. If this field is not provided, this matches all port names and + // numbers. + // If present, only traffic on the specified protocol AND port will be matched. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` + + // endPort indicates that the range of ports from port to endPort if set, inclusive, + // should be allowed by the policy. This field cannot be defined if the port field + // is not defined or if the port field is defined as a named (string) port. + // The endPort must be equal or greater than port. + // +optional + EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` +} + +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +type IPBlock struct { + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` + + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range + // +optional + Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` +} + +// NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of +// fields are allowed +type NetworkPolicyPeer struct { + // podSelector is a label selector which selects pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. + // +optional + PodSelector *slim_metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. + // + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. + // +optional + NamespaceSelector *slim_metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // ipBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + slim_metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go new file mode 100644 index 0000000000..136d2456aa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2020 The Kubernetes Authors. + +package v1 + +const ( + // AnnotationIsDefaultIngressClass can be used to indicate that an + // IngressClass should be considered default. When a single IngressClass + // resource has this annotation set to true, new Ingress resources without a + // class specified will be assigned this default class. + AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..3e9b954348 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go @@ -0,0 +1,255 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in + if in.Except != nil { + in, out := &in.Except, &out.Except + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. +func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyEgressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. +func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + *out = new(IPBlock) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. +func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { + if in == nil { + return nil + } + out := new(NetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(corev1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. +func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { + if in == nil { + return nil + } + out := new(NetworkPolicyPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]NetworkPolicyEgressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyTypes != nil { + in, out := &in.PolicyTypes, &out.PolicyTypes + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go new file mode 100644 index 0000000000..eec20108d6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go @@ -0,0 +1,317 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package v1 + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *IPBlock) DeepEqual(other *IPBlock) bool { + if other == nil { + return false + } + + if in.CIDR != other.CIDR { + return false + } + if ((in.Except != nil) && (other.Except != nil)) || ((in.Except == nil) != (other.Except == nil)) { + in, other := &in.Except, &other.Except + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicy) DeepEqual(other *NetworkPolicy) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepEqual(other *NetworkPolicyEgressRule) bool { + if other == nil { + return false + } + + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.To != nil) && (other.To != nil)) || ((in.To == nil) != (other.To == nil)) { + in, other := &in.To, &other.To + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepEqual(other *NetworkPolicyIngressRule) bool { + if other == nil { + return false + } + + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.From != nil) && (other.From != nil)) || ((in.From == nil) != (other.From == nil)) { + in, other := &in.From, &other.From + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyList) DeepEqual(other *NetworkPolicyList) bool { + if other == nil { + return false + } + + if in.TypeMeta != other.TypeMeta { + return false + } + + if !in.ListMeta.DeepEqual(&other.ListMeta) { + return false + } + + if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) { + in, other := &in.Items, &other.Items + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyPeer) DeepEqual(other *NetworkPolicyPeer) bool { + if other == nil { + return false + } + + if (in.PodSelector == nil) != (other.PodSelector == nil) { + return false + } else if in.PodSelector != nil { + if !in.PodSelector.DeepEqual(other.PodSelector) { + return false + } + } + + if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) { + return false + } else if in.NamespaceSelector != nil { + if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) { + return false + } + } + + if (in.IPBlock == nil) != (other.IPBlock == nil) { + return false + } else if in.IPBlock != nil { + if !in.IPBlock.DeepEqual(other.IPBlock) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyPort) DeepEqual(other *NetworkPolicyPort) bool { + if other == nil { + return false + } + + if (in.Protocol == nil) != (other.Protocol == nil) { + return false + } else if in.Protocol != nil { + if *in.Protocol != *other.Protocol { + return false + } + } + + if (in.Port == nil) != (other.Port == nil) { + return false + } else if in.Port != nil { + if !in.Port.DeepEqual(other.Port) { + return false + } + } + + if (in.EndPort == nil) != (other.EndPort == nil) { + return false + } else if in.EndPort != nil { + if *in.EndPort != *other.EndPort { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicySpec) DeepEqual(other *NetworkPolicySpec) bool { + if other == nil { + return false + } + + if !in.PodSelector.DeepEqual(&other.PodSelector) { + return false + } + + if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) { + in, other := &in.Ingress, &other.Ingress + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.Egress != nil) && (other.Egress != nil)) || ((in.Egress == nil) != (other.Egress == nil)) { + in, other := &in.Egress, &other.Egress + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.PolicyTypes != nil) && (other.PolicyTypes != nil)) || ((in.PolicyTypes == nil) != (other.PolicyTypes == nil)) { + in, other := &in.PolicyTypes, &other.PolicyTypes + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..ba34515351 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..cc6ff2709f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + apiextensionsv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go new file mode 100644 index 0000000000..5f5e04bd9a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme" + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiextensionsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiextensionsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1Client { + return &ApiextensionsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 0000000000..d251544ef5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme" + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go new file mode 100644 index 0000000000..50cfbd485a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go new file mode 100644 index 0000000000..2ea7378ea9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go new file mode 100644 index 0000000000..e15d6ef49a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright The Kubernetes Authors. + +package clientset + +import ( + "fmt" + "net/http" + + apiextclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + + slim_apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1" +) + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *apiextclientset.Clientset + apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client + apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client +} + +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return c.apiextensionsV1 +} + +// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client +func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { + return c.apiextensionsV1beta1 +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.Clientset, err = apiextclientset.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + // Wrap extensionsV1 with our own implementation + extensionsV1, err := slim_apiextensionsv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.apiextensionsV1 = apiextensionsv1.New(extensionsV1.RESTClient()) + + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.Clientset = apiextclientset.NewForConfigOrDie(c) + + // Wrap extensionsV1 with our own implementation + cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.NewForConfigOrDie(c).RESTClient()) + + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.Clientset = apiextclientset.New(c) + + // Wrap extensionsV1 with our own implementation + cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.New(c).RESTClient()) + + return &cs +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go new file mode 100644 index 0000000000..c54b4a560b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go new file mode 100644 index 0000000000..c53e29423d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_2ae25e910fba1c55, []int{0} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ae25e910fba1c55, []int{1} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CustomResourceDefinition)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinitionList") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto", fileDescriptor_2ae25e910fba1c55) +} + +var fileDescriptor_2ae25e910fba1c55 = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0x4f, 0x8b, 0xda, 0x40, + 0x18, 0x87, 0x33, 0x16, 0x41, 0x22, 0x85, 0x92, 0x93, 0x48, 0x19, 0xc5, 0x93, 0x97, 0x4e, 0xd0, + 0x43, 0xf1, 0x66, 0x49, 0x4b, 0xa1, 0xd0, 0x22, 0x78, 0x6b, 0x6f, 0x63, 0x7c, 0x8d, 0xd3, 0x38, + 0x33, 0x21, 0x33, 0x91, 0xdd, 0xcb, 0xb2, 0x9f, 0x40, 0xf6, 0x63, 0x79, 0xf4, 0xe8, 0x49, 0xd6, + 0xec, 0x17, 0x59, 0x66, 0xfc, 0xb7, 0xbb, 0xae, 0x2c, 0xae, 0x97, 0xcc, 0x3b, 0x2f, 0x79, 0x9f, + 0xdf, 0x33, 0x21, 0xe3, 0xf6, 0x22, 0xa6, 0xc7, 0xd9, 0x80, 0x84, 0x92, 0xfb, 0x21, 0x9b, 0xb0, + 0x6c, 0xbf, 0x24, 0x71, 0xe4, 0xc7, 0x1d, 0xe5, 0xab, 0x09, 0xe3, 0xb6, 0xa0, 0x09, 0xb3, 0x0f, + 0xb8, 0xd2, 0x20, 0x14, 0x93, 0x42, 0xf9, 0xd3, 0x96, 0x1f, 0x81, 0x80, 0x94, 0x6a, 0x18, 0x92, + 0x24, 0x95, 0x5a, 0x7a, 0xdd, 0x03, 0x90, 0x6c, 0x48, 0xbb, 0x25, 0x89, 0x23, 0x12, 0x77, 0x14, + 0x31, 0x40, 0x5b, 0x18, 0x20, 0x79, 0x06, 0x24, 0xd3, 0x56, 0xf5, 0xe7, 0x99, 0x46, 0x1c, 0x34, + 0x7d, 0x45, 0xa4, 0xfa, 0xe5, 0x09, 0x27, 0x92, 0x91, 0xf4, 0x6d, 0x7b, 0x90, 0x8d, 0xec, 0xce, + 0x6e, 0x6c, 0xb5, 0x7d, 0xdd, 0x00, 0x09, 0x93, 0x86, 0xc9, 0x69, 0x38, 0x66, 0x02, 0xd2, 0x6b, + 0x9b, 0x98, 0x66, 0x42, 0x33, 0x0e, 0x47, 0xfc, 0xaf, 0x6f, 0x0d, 0xa8, 0x70, 0x0c, 0x9c, 0xbe, + 0x9c, 0x6b, 0xcc, 0x90, 0x5b, 0xf9, 0x9e, 0x29, 0x2d, 0x79, 0x1f, 0x94, 0xcc, 0xd2, 0x10, 0x7e, + 0xc0, 0x88, 0x09, 0xa6, 0x99, 0x14, 0x5e, 0xea, 0x96, 0xcc, 0x79, 0x86, 0x54, 0xd3, 0x0a, 0xaa, + 0xa3, 0x66, 0xb9, 0x1d, 0x90, 0x33, 0x3f, 0xa8, 0x99, 0x27, 0xd3, 0x16, 0xe9, 0x0d, 0xfe, 0x43, + 0xa8, 0xff, 0x80, 0xa6, 0x81, 0x37, 0x5f, 0xd5, 0x9c, 0x7c, 0x55, 0x73, 0x0f, 0xbd, 0xfe, 0x3e, + 0xa7, 0x31, 0x2b, 0xb8, 0x9f, 0x4f, 0x09, 0xfd, 0x66, 0x4a, 0x7b, 0xe2, 0x48, 0xea, 0xdb, 0x7b, + 0xa5, 0x0c, 0xcf, 0x2a, 0x7d, 0xda, 0x2a, 0x95, 0x76, 0x9d, 0x83, 0x90, 0x77, 0xe3, 0x16, 0x99, + 0x06, 0xae, 0x2a, 0x85, 0xfa, 0x87, 0x66, 0xb9, 0xfd, 0x97, 0x5c, 0xf8, 0x4b, 0x91, 0x53, 0xa7, + 0x0b, 0x3e, 0x6e, 0x2d, 0x8a, 0xbf, 0x4c, 0x5e, 0x7f, 0x13, 0x1b, 0xc0, 0x7c, 0x8d, 0x9d, 0xc5, + 0x1a, 0x3b, 0xcb, 0x35, 0x76, 0x6e, 0x73, 0x8c, 0xe6, 0x39, 0x46, 0x8b, 0x1c, 0xa3, 0x65, 0x8e, + 0xd1, 0x7d, 0x8e, 0xd1, 0xdd, 0x03, 0x76, 0xfe, 0x75, 0x2f, 0xbc, 0x39, 0x8f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xe9, 0x04, 0x05, 0xb8, 0x7b, 0x03, 0x00, 0x00, +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto new file mode 100644 index 0000000000..a78a196617 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1; + +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"; + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +message CustomResourceDefinition { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go new file mode 100644 index 0000000000..71fba678c3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go new file mode 100644 index 0000000000..9be5bf3e25 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2019 The Kubernetes Authors. + +package v1 + +import ( + metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0acce338eb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -0,0 +1,72 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go new file mode 100644 index 0000000000..b55d23db59 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta + +package v1beta1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go new file mode 100644 index 0000000000..b419723c87 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_1a84ae209524fd15, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PartialObjectMetadataList)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1beta1.PartialObjectMetadataList") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto", fileDescriptor_1a84ae209524fd15) +} + +var fileDescriptor_1a84ae209524fd15 = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x3f, 0x4f, 0x02, 0x31, + 0x18, 0x87, 0xaf, 0x1a, 0x12, 0x72, 0xc4, 0xc4, 0x30, 0x21, 0x43, 0x21, 0x4e, 0x2c, 0xb6, 0x81, + 0xc1, 0x90, 0xb0, 0x18, 0x06, 0x13, 0xa3, 0x44, 0xc3, 0xe8, 0xf6, 0xde, 0x51, 0x8f, 0x7a, 0xf4, + 0xee, 0x72, 0x7d, 0xcf, 0xc4, 0xcd, 0x8f, 0xe0, 0xc7, 0x62, 0x64, 0x64, 0x22, 0x52, 0x3f, 0x88, + 0xa6, 0xe5, 0x40, 0x43, 0x9c, 0x70, 0x7a, 0xff, 0xa4, 0x79, 0x9e, 0x5f, 0x9b, 0xfa, 0xb7, 0x91, + 0xc4, 0x69, 0x11, 0xb0, 0x30, 0x55, 0x3c, 0x94, 0x33, 0x59, 0xec, 0x4a, 0x16, 0x47, 0x3c, 0xee, + 0x6b, 0xae, 0x67, 0x52, 0xb9, 0x06, 0x32, 0xa9, 0xb9, 0x12, 0x08, 0xfc, 0xa5, 0x1b, 0x08, 0x84, + 0x2e, 0x8f, 0x44, 0x22, 0x72, 0x40, 0x31, 0x61, 0x59, 0x9e, 0x62, 0x5a, 0x1f, 0xfc, 0xc0, 0xd8, + 0x86, 0xb2, 0x2d, 0x59, 0x1c, 0xb1, 0xb8, 0xaf, 0x99, 0x85, 0xb9, 0xc6, 0xc2, 0x98, 0x85, 0xb1, + 0x12, 0xd6, 0xbc, 0x3e, 0x2c, 0xc9, 0x7e, 0x88, 0xe6, 0xc5, 0x2f, 0x4e, 0x94, 0x46, 0x29, 0x77, + 0xeb, 0xa0, 0x78, 0x72, 0x93, 0x1b, 0x5c, 0x57, 0x1e, 0xbf, 0xb4, 0x69, 0x64, 0x6a, 0x99, 0x0a, + 0xc2, 0xa9, 0x4c, 0x44, 0xfe, 0xea, 0x8c, 0x79, 0x91, 0xa0, 0x54, 0x82, 0xeb, 0x70, 0x2a, 0x14, + 0xec, 0x6b, 0xce, 0xbf, 0x88, 0x7f, 0xf6, 0x00, 0x39, 0x4a, 0x98, 0xdd, 0x07, 0xcf, 0x22, 0xc4, + 0x91, 0x40, 0x98, 0x00, 0xc2, 0x9d, 0xd4, 0x58, 0x4f, 0xfc, 0xaa, 0x2a, 0xe7, 0xc6, 0x51, 0x9b, + 0x74, 0x6a, 0xbd, 0x2b, 0x76, 0xd8, 0xe3, 0x30, 0xcb, 0xb3, 0xec, 0xe1, 0xe9, 0x7c, 0xd5, 0xf2, + 0xcc, 0xaa, 0x55, 0xdd, 0x6e, 0xc6, 0x3b, 0x47, 0x3d, 0xf7, 0x2b, 0x12, 0x85, 0xd2, 0x0d, 0xd2, + 0x3e, 0xee, 0xd4, 0x7a, 0xa3, 0x43, 0x65, 0x7f, 0xde, 0x68, 0x78, 0x52, 0x9a, 0x2b, 0x37, 0xd6, + 0x31, 0xde, 0xa8, 0x86, 0x30, 0x5f, 0x53, 0x6f, 0xb1, 0xa6, 0xde, 0x72, 0x4d, 0xbd, 0x37, 0x43, + 0xc9, 0xdc, 0x50, 0xb2, 0x30, 0x94, 0x2c, 0x0d, 0x25, 0x1f, 0x86, 0x92, 0xf7, 0x4f, 0xea, 0x3d, + 0x0e, 0xfe, 0xf1, 0xc1, 0xbe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe4, 0x91, 0x70, 0x9e, 0x02, + 0x00, 0x00, +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto new file mode 100644 index 0000000000..be61fe3a0a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1beta1; + +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1"; + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 2; + + // items contains each of the included items. + repeated github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.PartialObjectMetadata items = 1; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go new file mode 100644 index 0000000000..5d62e06556 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go new file mode 100644 index 0000000000..aa1a77b82f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2017 The Kubernetes Authors. + +// package v1beta1 is alpha objects from meta that will be introduced. +package v1beta1 + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` + + // items contains each of the included items. + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d3b949f0dd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,47 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go new file mode 100644 index 0000000000..3955e271b0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +deepequal-gen=package + +// Package types contains slimmer versions of k8s types. +// +groupName=util +package intstr diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go new file mode 100644 index 0000000000..376a6c0f87 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto + +package intstr + +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_8984be45904ea297, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IntOrString)(nil), "github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto", fileDescriptor_8984be45904ea297) +} + +var fileDescriptor_8984be45904ea297 = []byte{ + // 293 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0x4a, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcc, 0xc9, 0x2c, 0x85, 0x53, 0x05, 0xd9, + 0xe9, 0xfa, 0xd9, 0x16, 0xc5, 0xfa, 0xc5, 0x39, 0x99, 0xb9, 0x60, 0x46, 0x62, 0x41, 0x66, 0xb1, + 0x7e, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, 0x91, 0x7e, 0x7a, 0x6a, 0x5e, + 0x6a, 0x51, 0x62, 0x49, 0x6a, 0x8a, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x90, 0x15, 0xc2, 0x2c, + 0x3d, 0x88, 0x21, 0x30, 0xaa, 0x20, 0x3b, 0x5d, 0x2f, 0xdb, 0xa2, 0x58, 0x0f, 0x64, 0x16, 0x98, + 0x01, 0x32, 0x4b, 0x0f, 0x64, 0x96, 0x1e, 0xc4, 0x2c, 0x29, 0x5d, 0x24, 0x77, 0xa4, 0xe7, 0xa7, + 0xe7, 0xeb, 0x83, 0x8d, 0x4c, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0x62, 0x95, 0xd2, + 0x44, 0x46, 0x2e, 0x6e, 0xcf, 0xbc, 0x12, 0xff, 0xa2, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x21, + 0x0d, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x66, 0x27, 0x91, 0x13, + 0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0x67, 0x09, 0xa9, 0x2c, 0x48, 0xfd, 0x05, 0xa5, 0x83, 0xc0, + 0x2a, 0x84, 0xd4, 0xb8, 0xd8, 0x32, 0xf3, 0x4a, 0xc2, 0x12, 0x73, 0x24, 0x98, 0x14, 0x18, 0x35, + 0x58, 0x9d, 0xf8, 0xa0, 0x6a, 0xd9, 0x3c, 0xc1, 0xa2, 0x41, 0x50, 0x59, 0x90, 0xba, 0xe2, 0x92, + 0x22, 0x90, 0x3a, 0x66, 0x05, 0x46, 0x0d, 0x4e, 0x84, 0xba, 0x60, 0xb0, 0x68, 0x10, 0x54, 0xd6, + 0x8a, 0x63, 0xc6, 0x02, 0x79, 0x86, 0x86, 0x3b, 0x0a, 0x0c, 0x4e, 0x09, 0x27, 0x1e, 0xca, 0x31, + 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0xbc, 0xf1, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, + 0xe5, 0x18, 0xa2, 0xac, 0xc8, 0x0f, 0x70, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x17, 0x78, + 0x08, 0xad, 0x01, 0x00, 0x00, +} + +func (m *IntOrString) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntVal |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto new file mode 100644 index 0000000000..2d929fd185 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.aaakk.us.kg.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go new file mode 100644 index 0000000000..ca2f03b6d1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2014 The Kubernetes Authors. + +package intstr + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "runtime/debug" + "strconv" + "strings" + + "k8s.io/klog/v2" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` +} + +// Type represents the stored type of IntOrString. +type Type int64 + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// Deprecated: use FromInt32 instead. +func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an int32 integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return FromString(val) + } + return FromInt32(int32(i)) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int, +// returning 0 if a parsing error occurs. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } + +// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing +// the OpenAPI v3 spec of this type. +func (IntOrString) OpenAPIV3OneOfTypes() []string { return []string{"integer", "string"} } + +func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { + if intOrPercent == nil { + return &defaultValue + } + return intOrPercent +} + +// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. +// This method returns a scaled value from an IntOrString type. If the IntOrString +// is a percentage string value it's treated as a percentage and scaled appropriately +// in accordance to the total, if it's an int value it's treated as a simple value and +// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. +func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValueSafely(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// GetValueFromIntOrPercent was deprecated in favor of +// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all +// strings with or without a percent symbol as a percentage value. +// Deprecated +func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValue(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent +// For a more correct implementation call getIntOrPercentSafely +func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + s := strings.Replace(intOrStr.StrVal, "%", "", -1) + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), true, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} + +func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go new file mode 100644 index 0000000000..c9cb4ad583 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go @@ -0,0 +1,29 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package intstr + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *IntOrString) DeepEqual(other *IntOrString) bool { + if other == nil { + return false + } + + if in.Type != other.Type { + return false + } + if in.IntVal != other.IntVal { + return false + } + if in.StrVal != other.StrVal { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go new file mode 100644 index 0000000000..516fa061d2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1" + discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1" + discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1" + networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CoreV1() corev1.CoreV1Interface + DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface + DiscoveryV1() discoveryv1.DiscoveryV1Interface + NetworkingV1() networkingv1.NetworkingV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + coreV1 *corev1.CoreV1Client + discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client + discoveryV1 *discoveryv1.DiscoveryV1Client + networkingV1 *networkingv1.NetworkingV1Client +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return c.coreV1 +} + +// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client +func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { + return c.discoveryV1beta1 +} + +// DiscoveryV1 retrieves the DiscoveryV1Client +func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface { + return c.discoveryV1 +} + +// NetworkingV1 retrieves the NetworkingV1Client +func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { + return c.networkingV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.coreV1, err = corev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.discoveryV1, err = discoveryv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.coreV1 = corev1.New(c) + cs.discoveryV1beta1 = discoveryv1beta1.New(c) + cs.discoveryV1 = discoveryv1.New(c) + cs.networkingV1 = networkingv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..03fbe929fa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned" + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1" + fakecorev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake" + discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1" + fakediscoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake" + discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1" + fakediscoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake" + networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1" + fakenetworkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +} + +// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client +func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { + return &fakediscoveryv1beta1.FakeDiscoveryV1beta1{Fake: &c.Fake} +} + +// DiscoveryV1 retrieves the DiscoveryV1Client +func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface { + return &fakediscoveryv1.FakeDiscoveryV1{Fake: &c.Fake} +} + +// NetworkingV1 retrieves the NetworkingV1Client +func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { + return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..1da8cc9d5b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..8053061f47 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + corev1.AddToScheme, + discoveryv1beta1.AddToScheme, + discoveryv1.AddToScheme, + networkingv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..ba34515351 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..dd5b051f72 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + corev1.AddToScheme, + discoveryv1beta1.AddToScheme, + discoveryv1.AddToScheme, + networkingv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go new file mode 100644 index 0000000000..25eab19580 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type CoreV1Interface interface { + RESTClient() rest.Interface + EndpointsGetter + NamespacesGetter + NodesGetter + PodsGetter + SecretsGetter + ServicesGetter +} + +// CoreV1Client is used to interact with features provided by the group. +type CoreV1Client struct { + restClient rest.Interface +} + +func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreV1Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreV1Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreV1Client) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +// NewForConfig creates a new CoreV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoreV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CoreV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1Client { + return &CoreV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/api" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go new file mode 100644 index 0000000000..50cfbd485a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go new file mode 100644 index 0000000000..9f363250eb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error) + Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client rest.Interface + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreV1Client, namespace string) *endpoints { + return &endpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpoints). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpoints). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched endpoints. +func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 0000000000..60176f481b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCoreV1 struct { + *testing.Fake +} + +func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface { + return &FakeEndpoints{c, namespace} +} + +func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface { + return &FakeNamespaces{c} +} + +func (c *FakeCoreV1) Nodes() v1.NodeInterface { + return &FakeNodes{c} +} + +func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface { + return &FakePods{c, namespace} +} + +func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface { + return &FakeSecrets{c, namespace} +} + +func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCoreV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go new file mode 100644 index 0000000000..9029e198c0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEndpoints implements EndpointsInterface +type FakeEndpoints struct { + Fake *FakeCoreV1 + ns string +} + +var endpointsResource = v1.SchemeGroupVersion.WithResource("endpoints") + +var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.EndpointsList{ListMeta: obj.(*v1.EndpointsList).ListMeta} + for _, item := range obj.(*v1.EndpointsList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) + +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &v1.Endpoints{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) + return err +} + +// Patch applies the patch and returns the patched endpoints. +func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go new file mode 100644 index 0000000000..8115465835 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNamespaces implements NamespaceInterface +type FakeNamespaces struct { + Fake *FakeCoreV1 +} + +var namespacesResource = v1.SchemeGroupVersion.WithResource("namespaces") + +var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.NamespaceList{ListMeta: obj.(*v1.NamespaceList).ListMeta} + for _, item := range obj.(*v1.NamespaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &v1.Namespace{}) + return err +} + +// Patch applies the patch and returns the patched namespace. +func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go new file mode 100644 index 0000000000..316a908426 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNodes implements NodeInterface +type FakeNodes struct { + Fake *FakeCoreV1 +} + +var nodesResource = v1.SchemeGroupVersion.WithResource("nodes") + +var nodesKind = v1.SchemeGroupVersion.WithKind("Node") + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.NodeList{ListMeta: obj.(*v1.NodeList).ListMeta} + for _, item := range obj.(*v1.NodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &v1.Node{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1.NodeList{}) + return err +} + +// Patch applies the patch and returns the patched node. +func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go new file mode 100644 index 0000000000..dca3fe6f00 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePods implements PodInterface +type FakePods struct { + Fake *FakeCoreV1 + ns string +} + +var podsResource = v1.SchemeGroupVersion.WithResource("pods") + +var podsKind = v1.SchemeGroupVersion.WithKind("Pod") + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.PodList{ListMeta: obj.(*v1.PodList).ListMeta} + for _, item := range obj.(*v1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) + +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &v1.Pod{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.PodList{}) + return err +} + +// Patch applies the patch and returns the patched pod. +func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +// UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go new file mode 100644 index 0000000000..964116565f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeSecrets implements SecretInterface +type FakeSecrets struct { + Fake *FakeCoreV1 + ns string +} + +var secretsResource = v1.SchemeGroupVersion.WithResource("secrets") + +var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.SecretList{ListMeta: obj.(*v1.SecretList).ListMeta} + for _, item := range obj.(*v1.SecretList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) + +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &v1.Secret{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.SecretList{}) + return err +} + +// Patch applies the patch and returns the patched secret. +func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go new file mode 100644 index 0000000000..a68cb60672 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCoreV1 + ns string +} + +var servicesResource = v1.SchemeGroupVersion.WithResource("services") + +var servicesKind = v1.SchemeGroupVersion.WithKind("Service") + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceList{ListMeta: obj.(*v1.ServiceList).ListMeta} + for _, item := range obj.(*v1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &v1.Service{}) + + return err +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go new file mode 100644 index 0000000000..4245920bad --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type EndpointsExpansion interface{} + +type NamespaceExpansion interface{} + +type NodeExpansion interface{} + +type PodExpansion interface{} + +type SecretExpansion interface{} + +type ServiceExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go new file mode 100644 index 0000000000..7fea2416da --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) + Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client rest.Interface +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreV1Client) *namespaces { + return &namespaces{ + client: c.RESTClient(), + } +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(namespace). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(namespace). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched namespace. +func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Patch(pt). + Resource("namespaces"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go new file mode 100644 index 0000000000..6d2e916cff --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) + Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client rest.Interface +} + +// newNodes returns a Nodes +func newNodes(c *CoreV1Client) *nodes { + return &nodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(node). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(node). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(node). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched node. +func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Patch(pt). + Resource("nodes"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go new file mode 100644 index 0000000000..f46d4d7c3d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) + Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) + UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client rest.Interface + ns string +} + +// newPods returns a Pods +func newPods(c *CoreV1Client, namespace string) *pods { + return &pods{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pod). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pod). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pod). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched pod. +func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pods"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pod). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go new file mode 100644 index 0000000000..ccbf30c163 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error) + Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client rest.Interface + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreV1Client, namespace string) *secrets { + return &secrets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secret). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secret). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched secret. +func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secrets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go new file mode 100644 index 0000000000..b981e19796 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) + Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *CoreV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(service). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(service). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(service). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go new file mode 100644 index 0000000000..ca9ef1187f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &DiscoveryV1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1Client { + return &DiscoveryV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go new file mode 100644 index 0000000000..50cfbd485a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go new file mode 100644 index 0000000000..cfb4ab528e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (*v1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (*v1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EndpointSlice, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointSliceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { + result = &v1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { + result = &v1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpointSlice). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { + result = &v1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpointSlice). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { + result = &v1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go new file mode 100644 index 0000000000..0cee06b994 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeDiscoveryV1 struct { + *testing.Fake +} + +func (c *FakeDiscoveryV1) EndpointSlices(namespace string) v1.EndpointSliceInterface { + return &FakeEndpointSlices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeDiscoveryV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go new file mode 100644 index 0000000000..d04fdcf47c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEndpointSlices implements EndpointSliceInterface +type FakeEndpointSlices struct { + Fake *FakeDiscoveryV1 + ns string +} + +var endpointslicesResource = v1.SchemeGroupVersion.WithResource("endpointslices") + +var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.EndpointSlice), err +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.EndpointSliceList{ListMeta: obj.(*v1.EndpointSliceList).ListMeta} + for _, item := range obj.(*v1.EndpointSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.EndpointSlice), err +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.EndpointSlice), err +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1.EndpointSlice{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) + return err +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.EndpointSlice), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go new file mode 100644 index 0000000000..7295b601e6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go new file mode 100644 index 0000000000..d2d5565818 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1beta1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1beta1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1beta1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &DiscoveryV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1beta1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1beta1Client { + return &DiscoveryV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go new file mode 100644 index 0000000000..07e23ed4aa --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go new file mode 100644 index 0000000000..a77bbb97e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpointSlice). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(endpointSlice). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go new file mode 100644 index 0000000000..a1d258d80c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeDiscoveryV1beta1 struct { + *testing.Fake +} + +func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface { + return &FakeEndpointSlices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeDiscoveryV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go new file mode 100644 index 0000000000..ddc55497d1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEndpointSlices implements EndpointSliceInterface +type FakeEndpointSlices struct { + Fake *FakeDiscoveryV1beta1 + ns string +} + +var endpointslicesResource = v1beta1.SchemeGroupVersion.WithResource("endpointslices") + +var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta} + for _, item := range obj.(*v1beta1.EndpointSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1beta1.EndpointSlice{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) + return err +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.EndpointSlice), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..1aa54bf9b1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go new file mode 100644 index 0000000000..50cfbd485a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go new file mode 100644 index 0000000000..57bd090efb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go new file mode 100644 index 0000000000..6e5f98fde0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNetworkingV1 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface { + return &FakeNetworkPolicies{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go new file mode 100644 index 0000000000..e51d9f3670 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNetworkPolicies implements NetworkPolicyInterface +type FakeNetworkPolicies struct { + Fake *FakeNetworkingV1 + ns string +} + +var networkpoliciesResource = v1.SchemeGroupVersion.WithResource("networkpolicies") + +var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.NetworkPolicy), err +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.NetworkPolicyList{ListMeta: obj.(*v1.NetworkPolicyList).ListMeta} + for _, item := range obj.(*v1.NetworkPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.NetworkPolicy), err +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.NetworkPolicy), err +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1.NetworkPolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.NetworkPolicy), err +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go new file mode 100644 index 0000000000..e78ec06091 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type NetworkPolicyExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go new file mode 100644 index 0000000000..ab93e1dfa9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1Interface interface { + RESTClient() rest.Interface + NetworkPoliciesGetter +} + +// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + +// NewForConfig creates a new NetworkingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1Client { + return &NetworkingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go new file mode 100644 index 0000000000..f657e00a31 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" + scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(networkPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(networkPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/types/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/types/doc.go new file mode 100644 index 0000000000..7fc8fc41bb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/types/doc.go @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +k8s:deepcopy-gen=package,register +// +deepequal-gen=package + +// Package types contains slimmer versions of k8s types. +// +groupName=pkg +package types diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/types/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/types/types.go new file mode 100644 index 0000000000..6f7b36c60e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/types/types.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/cilium/cilium/api/v1/models" + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +deepequal-gen=true +type SlimCNP struct { + *v2.CiliumNetworkPolicy +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +deepequal-gen:private-method=true +type CiliumEndpoint struct { + // +deepequal-gen=false + slim_metav1.TypeMeta + // +deepequal-gen=false + slim_metav1.ObjectMeta + Identity *v2.EndpointIdentity + Networking *v2.EndpointNetworking + Encryption *v2.EncryptionSpec + NamedPorts models.NamedPorts +} + +type Configuration interface { + K8sAPIDiscoveryEnabled() bool +} + +func (in *CiliumEndpoint) DeepEqual(other *CiliumEndpoint) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if in.Namespace != other.Namespace { + return false + } + + return in.deepEqual(other) +} + +// +deepequal-gen=true +type IPSlice []string + +// UnserializableObject is a skeleton embeddable k8s object that implements +// GetObjectKind() of runtime.Object. Useful with Resource[T]'s +// WithTransform option when deriving from real objects. +// The struct into which this is embedded will also need to implement +// DeepCopyObject. This can be generated including the deepcopy-gen comment +// below in the parent object and running "make generate-k8s-api". +// +// +k8s:deepcopy-gen=false +type UnserializableObject struct{} + +func (UnserializableObject) GetObjectKind() schema.ObjectKind { + // Not serializable, so return the empty kind. + return schema.EmptyObjectKind +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d4a84efd29 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepcopy.go @@ -0,0 +1,116 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package types + +import ( + models "github.com/cilium/cilium/api/v1/models" + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumEndpoint) DeepCopyInto(out *CiliumEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(v2.EndpointIdentity) + (*in).DeepCopyInto(*out) + } + if in.Networking != nil { + in, out := &in.Networking, &out.Networking + *out = new(v2.EndpointNetworking) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(v2.EncryptionSpec) + **out = **in + } + if in.NamedPorts != nil { + in, out := &in.NamedPorts, &out.NamedPorts + *out = make(models.NamedPorts, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(models.Port) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpoint. +func (in *CiliumEndpoint) DeepCopy() *CiliumEndpoint { + if in == nil { + return nil + } + out := new(CiliumEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in IPSlice) DeepCopyInto(out *IPSlice) { + { + in := &in + *out = make(IPSlice, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSlice. +func (in IPSlice) DeepCopy() IPSlice { + if in == nil { + return nil + } + out := new(IPSlice) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlimCNP) DeepCopyInto(out *SlimCNP) { + *out = *in + if in.CiliumNetworkPolicy != nil { + in, out := &in.CiliumNetworkPolicy, &out.CiliumNetworkPolicy + *out = new(v2.CiliumNetworkPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlimCNP. +func (in *SlimCNP) DeepCopy() *SlimCNP { + if in == nil { + return nil + } + out := new(SlimCNP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SlimCNP) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepequal.go new file mode 100644 index 0000000000..ca6df549f2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/types/zz_generated.deepequal.go @@ -0,0 +1,98 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package types + +// deepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumEndpoint) deepEqual(other *CiliumEndpoint) bool { + if other == nil { + return false + } + + if (in.Identity == nil) != (other.Identity == nil) { + return false + } else if in.Identity != nil { + if !in.Identity.DeepEqual(other.Identity) { + return false + } + } + + if (in.Networking == nil) != (other.Networking == nil) { + return false + } else if in.Networking != nil { + if !in.Networking.DeepEqual(other.Networking) { + return false + } + } + + if (in.Encryption == nil) != (other.Encryption == nil) { + return false + } else if in.Encryption != nil { + if !in.Encryption.DeepEqual(other.Encryption) { + return false + } + } + + if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) { + in, other := &in.NamedPorts, &other.NamedPorts + if other == nil || !in.DeepEqual(other) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *IPSlice) DeepEqual(other *IPSlice) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *SlimCNP) DeepEqual(other *SlimCNP) bool { + if other == nil { + return false + } + + if (in.CiliumNetworkPolicy == nil) != (other.CiliumNetworkPolicy == nil) { + return false + } else if in.CiliumNetworkPolicy != nil { + if !in.CiliumNetworkPolicy.DeepEqual(other.CiliumNetworkPolicy) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *UnserializableObject) DeepEqual(other *UnserializableObject) bool { + if other == nil { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go b/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go new file mode 100644 index 0000000000..8d0e13cf82 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package version keeps track of the Kubernetes version the client is +// connected to +package version + +import ( + "context" + "fmt" + + "github.com/blang/semver/v4" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/versioncheck" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "k8s") + +// ServerCapabilities is a list of server capabilities derived based on +// version, the Kubernetes discovery API, or probing of individual API +// endpoints. +type ServerCapabilities struct { + // MinimalVersionMet is true when the minimal version of Kubernetes + // required to run Cilium has been met + MinimalVersionMet bool + + // EndpointSlice is the ability of k8s server to support endpoint slices + EndpointSlice bool + + // EndpointSliceV1 is the ability of k8s server to support endpoint slices + // v1. This version was introduced in K8s v1.21.0. + EndpointSliceV1 bool + + // LeasesResourceLock is the ability of K8s server to support Lease type + // from coordination.k8s.io/v1 API for leader election purposes(currently only in operator). + // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#lease-v1-coordination-k8s-io + // + // This capability was introduced in K8s version 1.14, prior to which + // we don't support HA mode for the cilium-operator. + LeasesResourceLock bool +} + +type cachedVersion struct { + mutex lock.RWMutex + capabilities ServerCapabilities + version semver.Version +} + +const ( + // MinimalVersionConstraint is the minimal version that Cilium supports to + // run kubernetes. + MinimalVersionConstraint = "1.16.0" +) + +var ( + cached = cachedVersion{} + + discoveryAPIGroupV1beta1 = "discovery.k8s.io/v1beta1" + discoveryAPIGroupV1 = "discovery.k8s.io/v1" + coordinationV1APIGroup = "coordination.k8s.io/v1" + endpointSliceKind = "EndpointSlice" + leaseKind = "Lease" + + // Constraint to check support for discovery/v1 types. Support for v1 + // discovery was introduced in K8s version 1.21. + isGEThanAPIDiscoveryV1 = versioncheck.MustCompile(">=1.21.0") + + // Constraint to check support for discovery/v1beta1 types. Support for + // v1beta1 discovery was introduced in K8s version 1.17. + isGEThanAPIDiscoveryV1Beta1 = versioncheck.MustCompile(">=1.17.0") + + // isGEThanMinimalVersionConstraint is the minimal version required to run + // Cilium + isGEThanMinimalVersionConstraint = versioncheck.MustCompile(">=" + MinimalVersionConstraint) +) + +// Version returns the version of the Kubernetes apiserver +func Version() semver.Version { + cached.mutex.RLock() + c := cached.version + cached.mutex.RUnlock() + return c +} + +// Capabilities returns the capabilities of the Kubernetes apiserver +func Capabilities() ServerCapabilities { + cached.mutex.RLock() + c := cached.capabilities + cached.mutex.RUnlock() + return c +} + +func DisableLeasesResourceLock() { + cached.mutex.Lock() + defer cached.mutex.Unlock() + cached.capabilities.LeasesResourceLock = false +} + +func updateVersion(version semver.Version) { + cached.mutex.Lock() + defer cached.mutex.Unlock() + + cached.version = version + + cached.capabilities.MinimalVersionMet = isGEThanMinimalVersionConstraint(version) + cached.capabilities.EndpointSliceV1 = isGEThanAPIDiscoveryV1(version) + cached.capabilities.EndpointSlice = isGEThanAPIDiscoveryV1Beta1(version) +} + +func updateServerGroupsAndResources(apiResourceLists []*metav1.APIResourceList) { + cached.mutex.Lock() + defer cached.mutex.Unlock() + + cached.capabilities.EndpointSlice = false + cached.capabilities.EndpointSliceV1 = false + cached.capabilities.LeasesResourceLock = false + for _, rscList := range apiResourceLists { + if rscList.GroupVersion == discoveryAPIGroupV1beta1 { + for _, rsc := range rscList.APIResources { + if rsc.Kind == endpointSliceKind { + cached.capabilities.EndpointSlice = true + break + } + } + } + if rscList.GroupVersion == discoveryAPIGroupV1 { + for _, rsc := range rscList.APIResources { + if rsc.Kind == endpointSliceKind { + cached.capabilities.EndpointSlice = true + cached.capabilities.EndpointSliceV1 = true + break + } + } + } + + if rscList.GroupVersion == coordinationV1APIGroup { + for _, rsc := range rscList.APIResources { + if rsc.Kind == leaseKind { + cached.capabilities.LeasesResourceLock = true + break + } + } + } + } +} + +// Force forces the use of a specific version +func Force(version string) error { + ver, err := versioncheck.Version(version) + if err != nil { + return err + } + updateVersion(ver) + return nil +} + +func endpointSlicesFallbackDiscovery(client kubernetes.Interface) error { + // If a k8s version with discovery v1 is used, then do not even bother + // checking for v1beta1 + cached.mutex.Lock() + if cached.capabilities.EndpointSliceV1 { + cached.capabilities.EndpointSlice = true + cached.mutex.Unlock() + return nil + } + cached.mutex.Unlock() + + // Discovery of API groups requires the API services of the apiserver to be + // healthy. Such API services can depend on the readiness of regular pods + // which require Cilium to function correctly. By treating failure to + // discover API groups as fatal, a critial loop can be entered in which + // Cilium cannot start because the API groups can't be discovered. + // + // Here we acknowledge the lack of discovery ability as non Fatal and fall back to probing + // the API directly. + _, err := client.DiscoveryV1beta1().EndpointSlices("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + if err == nil { + cached.mutex.Lock() + cached.capabilities.EndpointSlice = true + cached.mutex.Unlock() + return nil + } + + if errors.IsNotFound(err) { + log.WithError(err).Info("Unable to retrieve EndpointSlices for default/kubernetes. Disabling EndpointSlices") + // StatusNotFound is a safe error, EndpointSlices are + // disabled and the agent can continue. + return nil + } + + // Unknown error, we can't derive whether to enable or disable + // EndpointSlices and need to error out. + return fmt.Errorf("unable to validate EndpointSlices support: %s", err) +} + +func leasesFallbackDiscovery(client kubernetes.Interface, apiDiscoveryEnabled bool) error { + // apiDiscoveryEnabled is used to fallback leases discovery to directly + // probing the API when we cannot discover API groups. + // We require to check for Leases capabilities in operator only, which uses Leases + // for leader election purposes in HA mode. + if !apiDiscoveryEnabled { + log.Debugf("Skipping Leases support fallback discovery") + return nil + } + + // Similar to endpointSlicesFallbackDiscovery here we fallback to probing the Kubernetes + // API directly. `kube-controller-manager` creates a lease in the kube-system namespace + // and here we try and see if that Lease exists. + _, err := client.CoordinationV1().Leases("kube-system").Get(context.TODO(), "kube-controller-manager", metav1.GetOptions{}) + if err == nil { + cached.mutex.Lock() + cached.capabilities.LeasesResourceLock = true + cached.mutex.Unlock() + return nil + } + + if errors.IsNotFound(err) { + log.WithError(err).Info("Unable to retrieve Leases for kube-controller-manager. Disabling LeasesResourceLock") + // StatusNotFound is a safe error, Leases are + // disabled and the agent can continue + return nil + } + + // Unknown error, we can't derive whether to enable or disable + // LeasesResourceLock and need to error out + return fmt.Errorf("unable to validate LeasesResourceLock support: %s", err) +} + +func updateK8sServerVersion(client kubernetes.Interface) error { + var ver semver.Version + + sv, err := client.Discovery().ServerVersion() + if err != nil { + return err + } + + // Try GitVersion first. In case of error fallback to MajorMinor + if sv.GitVersion != "" { + // This is a string like "v1.9.0" + ver, err = versioncheck.Version(sv.GitVersion) + if err == nil { + updateVersion(ver) + return nil + } + } + + if sv.Major != "" && sv.Minor != "" { + ver, err = versioncheck.Version(fmt.Sprintf("%s.%s", sv.Major, sv.Minor)) + if err == nil { + updateVersion(ver) + return nil + } + } + + return fmt.Errorf("cannot parse k8s server version from %+v: %s", sv, err) +} + +// Update retrieves the version of the Kubernetes apiserver and derives the +// capabilities. This function must be called after connectivity to the +// apiserver has been established. +// +// Discovery of capabilities only works if the discovery API of the apiserver +// is functional. If it is not available, a warning is logged and the discovery +// falls back to probing individual API endpoints. +func Update(client kubernetes.Interface, apiDiscoveryEnabled bool) error { + err := updateK8sServerVersion(client) + if err != nil { + return err + } + + if apiDiscoveryEnabled { + // Discovery of API groups requires the API services of the + // apiserver to be healthy. Such API services can depend on the + // readiness of regular pods which require Cilium to function + // correctly. By treating failure to discover API groups as + // fatal, a critical loop can be entered in which Cilium cannot + // start because the API groups can't be discovered and th API + // groups will only become discoverable once Cilium is up. + _, apiResourceLists, err := client.Discovery().ServerGroupsAndResources() + if err != nil { + // It doesn't make sense to retry the retrieval of this + // information at a later point because the capabilities are + // primiarly used while the agent is starting up. Instead, fall + // back to probing API endpoints directly. + log.WithError(err).Warning("Unable to discover API groups and resources") + if err := endpointSlicesFallbackDiscovery(client); err != nil { + return err + } + + return leasesFallbackDiscovery(client, apiDiscoveryEnabled) + } + + updateServerGroupsAndResources(apiResourceLists) + } else { + if err := endpointSlicesFallbackDiscovery(client); err != nil { + return err + } + + return leasesFallbackDiscovery(client, apiDiscoveryEnabled) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go b/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go new file mode 100644 index 0000000000..95e4b1ca4a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// This package contains exported resource identifiers and metric resource labels related to +// K8s watchers. +package resources + +const ( + // K8sAPIGroupServiceV1Core is the identifier for K8s resources of type core/v1/Service. + K8sAPIGroupServiceV1Core = "core/v1::Service" + // K8sAPIGroupPodV1Core is the identifier for K8s resources of type core/v1/Pod. + K8sAPIGroupPodV1Core = "core/v1::Pods" + // K8sAPIGroupSecretV1Cores is the identifier for K8s resources of type core/v1/Secret. + K8sAPIGroupSecretV1Core = "core/v1::Secrets" + // K8sAPIGroupEndpointSliceOrEndpoint is the combined identifier for K8s EndpointSlice and + // Endpoint resources. + K8sAPIGroupEndpointSliceOrEndpoint = "EndpointSliceOrEndpoint" + + // MetricCNP is the scope label for CiliumNetworkPolicy event metrics. + MetricCNP = "CiliumNetworkPolicy" + // MetricCCNP is the scope label for CiliumClusterwideNetworkPolicy event metrics. + MetricCCNP = "CiliumClusterwideNetworkPolicy" + // MetricCCG is the scope label for CiliumCIDRGroup event metrics. + MetricCCG = "CiliumCIDRGroup" + // MetricService is the scope label for Kubernetes Service event metrics. + MetricService = "Service" + // MetricEndpoint is the scope label for Kubernetes Endpoint event metrics. + MetricEndpoint = "Endpoint" + // MetricEndpointSlice is the scope label for Kubernetes EndpointSlice event metrics. + MetricEndpointSlice = "EndpointSlice" + + // MetricCreate the label for watcher metrics related to create events. + MetricCreate = "create" + // MetricUpdate the label for watcher metrics related to update events. + MetricUpdate = "update" + // MetricDelete the label for watcher metrics related to delete events. + MetricDelete = "delete" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d9d4856bcb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepcopy.go @@ -0,0 +1,187 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package k8s + +import ( + net "net" + + cidr "github.com/cilium/cilium/pkg/cidr" + loadbalancer "github.com/cilium/cilium/pkg/loadbalancer" + store "github.com/cilium/cilium/pkg/service/store" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backend) DeepCopyInto(out *Backend) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make(store.PortConfiguration, len(*in)) + for key, val := range *in { + var outVal *loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(loadbalancer.L4Addr) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HintsForZones != nil { + in, out := &in.HintsForZones, &out.HintsForZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. +func (in *Backend) DeepCopy() *Backend { + if in == nil { + return nil + } + out := new(Backend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + if in.FrontendIPs != nil { + in, out := &in.FrontendIPs, &out.FrontendIPs + *out = make([]net.IP, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make(map[loadbalancer.FEPortName]*loadbalancer.L4Addr, len(*in)) + for key, val := range *in { + var outVal *loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(loadbalancer.L4Addr) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodePorts != nil { + in, out := &in.NodePorts, &out.NodePorts + *out = make(map[loadbalancer.FEPortName]NodePortToFrontend, len(*in)) + for key, val := range *in { + var outVal map[string]*loadbalancer.L3n4AddrID + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(NodePortToFrontend, len(*in)) + for key, val := range *in { + var outVal *loadbalancer.L3n4AddrID + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(loadbalancer.L3n4AddrID) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + (*out)[key] = outVal + } + } + if in.K8sExternalIPs != nil { + in, out := &in.K8sExternalIPs, &out.K8sExternalIPs + *out = make(map[string]net.IP, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.LoadBalancerIPs != nil { + in, out := &in.LoadBalancerIPs, &out.LoadBalancerIPs + *out = make(map[string]net.IP, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make(map[string]*cidr.CIDR, len(*in)) + for key, val := range *in { + var outVal *cidr.CIDR + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = (*in).DeepCopy() + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepequal.go new file mode 100644 index 0000000000..be6e9d52ef --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/zz_generated.deepequal.go @@ -0,0 +1,297 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package k8s + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Backend) DeepEqual(other *Backend) bool { + if other == nil { + return false + } + + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil || !in.DeepEqual(other) { + return false + } + } + + if in.NodeName != other.NodeName { + return false + } + if in.Terminating != other.Terminating { + return false + } + if ((in.HintsForZones != nil) && (other.HintsForZones != nil)) || ((in.HintsForZones == nil) != (other.HintsForZones == nil)) { + in, other := &in.HintsForZones, &other.HintsForZones + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if in.Preferred != other.Preferred { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EndpointSlices) DeepEqual(other *EndpointSlices) bool { + if other == nil { + return false + } + + if ((in.epSlices != nil) && (other.epSlices != nil)) || ((in.epSlices == nil) != (other.epSlices == nil)) { + in, other := &in.epSlices, &other.epSlices + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + } + + return true +} + +// deepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Endpoints) deepEqual(other *Endpoints) bool { + if other == nil { + return false + } + + if in.UnserializableObject != other.UnserializableObject { + return false + } + + if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) { + return false + } + + if in.EndpointSliceID != other.EndpointSliceID { + return false + } + + if ((in.Backends != nil) && (other.Backends != nil)) || ((in.Backends == nil) != (other.Backends == nil)) { + in, other := &in.Backends, &other.Backends + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NodePortToFrontend) DeepEqual(other *NodePortToFrontend) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + + return true +} + +// deepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Service) deepEqual(other *Service) bool { + if other == nil { + return false + } + + if in.IsHeadless != other.IsHeadless { + return false + } + if in.IncludeExternal != other.IncludeExternal { + return false + } + if in.Shared != other.Shared { + return false + } + if in.ServiceAffinity != other.ServiceAffinity { + return false + } + if in.ExtTrafficPolicy != other.ExtTrafficPolicy { + return false + } + if in.IntTrafficPolicy != other.IntTrafficPolicy { + return false + } + if in.HealthCheckNodePort != other.HealthCheckNodePort { + return false + } + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + } + + if ((in.NodePorts != nil) && (other.NodePorts != nil)) || ((in.NodePorts == nil) != (other.NodePorts == nil)) { + in, other := &in.NodePorts, &other.NodePorts + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(&otherValue) { + return false + } + } + } + } + } + + if ((in.LoadBalancerSourceRanges != nil) && (other.LoadBalancerSourceRanges != nil)) || ((in.LoadBalancerSourceRanges == nil) != (other.LoadBalancerSourceRanges == nil)) { + in, other := &in.LoadBalancerSourceRanges, &other.LoadBalancerSourceRanges + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + } + + if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) { + in, other := &in.Labels, &other.Labels + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } + + if ((in.Selector != nil) && (other.Selector != nil)) || ((in.Selector == nil) != (other.Selector == nil)) { + in, other := &in.Selector, &other.Selector + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } + + if in.SessionAffinity != other.SessionAffinity { + return false + } + if in.SessionAffinityTimeoutSec != other.SessionAffinityTimeoutSec { + return false + } + if in.TopologyAware != other.TopologyAware { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go new file mode 100644 index 0000000000..4345273bf2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +import ( + "context" + "fmt" + "path" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/allocator" + "github.com/cilium/cilium/pkg/idpool" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/rate" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "kvstorebackend") +) + +// kvstoreBackend is an implementaton of pkg/allocator.Backend. It store +// identities in the following format: +// +// Slave keys: +// +// Slave keys are owned by individual nodes: +// - basePath/value/key1/node1 => 1001 +// - basePath/value/key1/node2 => 1001 +// - basePath/value/key2/node1 => 1002 +// - basePath/value/key2/node2 => 1002 +// +// If at least one key exists with the prefix basePath/value/keyN then that +// key must be considered to be in use in the allocation space. +// +// Slave keys are protected by a lease and will automatically get removed +// after ~ option.Config.KVstoreLeaseTTL if the node does not renew in time. +// +// Master key: +// - basePath/id/1001 => key1 +// - basePath/id/1002 => key2 +// +// Master keys provide the mapping from ID to key. As long as a master key +// for an ID exists, the ID is still in use. However, if a master key is no +// longer backed by at least one slave key, the garbage collector will +// eventually release the master key and return it back to the pool. +type kvstoreBackend struct { + // lockless is true if allocation can be done lockless. This depends on + // the underlying kvstore backend + lockless bool + + // basePrefix is the prefix in the kvstore that all keys share which + // are being managed by this allocator. The basePrefix typically + // consists of something like: "space/project/allocatorName" + basePrefix string + + // idPrefix is the kvstore key prefix for all master keys. It is being + // derived from the basePrefix. + idPrefix string + + // valuePrefix is the kvstore key prefix for all slave keys. It is + // being derived from the basePrefix. + valuePrefix string + + // lockPrefix is the prefix to use for all kvstore locks. This prefix + // is different from the idPrefix and valuePrefix to simplify watching + // for ID and key changes. + lockPrefix string + + // suffix is the suffix attached to keys which must be node specific, + // this is typical set to the node's IP address + suffix string + + backend kvstore.BackendOperations + + keyType allocator.AllocatorKey +} + +func locklessCapability(backend kvstore.BackendOperations) bool { + required := kvstore.CapabilityCreateIfExists | kvstore.CapabilityDeleteOnZeroCount + return backend.GetCapabilities()&required == required +} + +func prefixMatchesKey(prefix, key string) bool { + // cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60 + lastSlash := strings.LastIndex(key, "/") + return len(prefix) == lastSlash +} + +// NewKVStoreBackend creates a pkg/allocator.Backend compatible instance. The +// specific kvstore used is configured in pkg/kvstore. +func NewKVStoreBackend(basePath, suffix string, typ allocator.AllocatorKey, backend kvstore.BackendOperations) (*kvstoreBackend, error) { + if backend == nil { + return nil, fmt.Errorf("kvstore client not configured") + } + + return &kvstoreBackend{ + basePrefix: basePath, + idPrefix: path.Join(basePath, "id"), + valuePrefix: path.Join(basePath, "value"), + lockPrefix: path.Join(basePath, "locks"), + suffix: suffix, + keyType: typ, + lockless: locklessCapability(backend), + backend: backend, + }, nil +} + +// lockPath locks a key in the scope of an allocator +func (k *kvstoreBackend) lockPath(ctx context.Context, key string) (*kvstore.Lock, error) { + suffix := strings.TrimPrefix(key, k.basePrefix) + return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix)) +} + +// DeleteAllKeys will delete all keys +func (k *kvstoreBackend) DeleteAllKeys(ctx context.Context) { + k.backend.DeletePrefix(ctx, k.basePrefix) +} + +// AllocateID allocates a key->ID mapping in the kvstore. +func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) { + // create /id/ and fail if it already exists + keyPath := path.Join(k.idPrefix, id.String()) + keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey()))) + success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false) + if err != nil || !success { + return nil, fmt.Errorf("unable to create master key '%s': %s", keyPath, err) + } + + return key, nil +} + +// AllocateID allocates a key->ID mapping in the kvstore. +func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) { + // create /id/ and fail if it already exists + keyPath := path.Join(k.idPrefix, id.String()) + keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey()))) + success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock) + if err != nil || !success { + return nil, fmt.Errorf("unable to create master key '%s': %s", keyPath, err) + } + + return key, nil +} + +// AcquireReference marks that this node is using this key->ID mapping in the kvstore. +func (k *kvstoreBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { + keyString := k.backend.Encode([]byte(key.GetKey())) + if err := k.createValueNodeKey(ctx, keyString, id, lock); err != nil { + return fmt.Errorf("unable to create slave key '%s': %s", keyString, err) + } + return nil +} + +// createValueKey records that this "node" is using this key->ID +func (k *kvstoreBackend) createValueNodeKey(ctx context.Context, key string, newID idpool.ID, lock kvstore.KVLocker) error { + // add a new key /value// to account for the reference + // The key is protected with a TTL/lease and will expire after LeaseTTL + valueKey := path.Join(k.valuePrefix, key, k.suffix) + if _, err := k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(newID.String()), true, lock); err != nil { + return fmt.Errorf("unable to create value-node key '%s': %s", valueKey, err) + } + + return nil +} + +// Lock locks a key in the scope of an allocator +func (k *kvstoreBackend) lock(ctx context.Context, key string) (*kvstore.Lock, error) { + suffix := strings.TrimPrefix(key, k.basePrefix) + return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix)) +} + +// Lock locks a key in the scope of an allocator +func (k *kvstoreBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) { + return k.lock(ctx, k.backend.Encode([]byte(key.GetKey()))) +} + +// Get returns the ID which is allocated to a key in the kvstore +func (k *kvstoreBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) { + // ListPrefix() will return all keys matching the prefix, the prefix + // can cover multiple different keys, example: + // + // key1 := label1;label2; + // key2 := label1;label2;label3; + // + // In order to retrieve the correct key, the position of the last '/' + // is significant, e.g. + // + // prefix := cilium/state/identities/v1/value/label;foo; + // + // key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60 + // key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60 + // + // Only key1 should match + prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey()))) + pairs, err := k.backend.ListPrefix(ctx, prefix) + kvstore.Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, "entries": len(pairs)}) + if err != nil { + return 0, err + } + + for k, v := range pairs { + if prefixMatchesKey(prefix, k) { + id, err := strconv.ParseUint(string(v.Data), 10, 64) + if err == nil { + return idpool.ID(id), nil + } + } + } + + return idpool.NoID, nil +} + +// GetIfLocked returns the ID which is allocated to a key in the kvstore +// if the client is still holding the given lock. +func (k *kvstoreBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) { + // ListPrefixIfLocked() will return all keys matching the prefix, the prefix + // can cover multiple different keys, example: + // + // key1 := label1;label2; + // key2 := label1;label2;label3; + // + // In order to retrieve the correct key, the position of the last '/' + // is significant, e.g. + // + // prefix := cilium/state/identities/v1/value/label;foo; + // + // key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60 + // key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60 + // + // Only key1 should match + prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey()))) + pairs, err := k.backend.ListPrefixIfLocked(ctx, prefix, lock) + kvstore.Trace("ListPrefixLocked", err, logrus.Fields{fieldPrefix: prefix, "entries": len(pairs)}) + if err != nil { + return 0, err + } + + for k, v := range pairs { + if prefixMatchesKey(prefix, k) { + id, err := strconv.ParseUint(string(v.Data), 10, 64) + if err == nil { + return idpool.ID(id), nil + } + } + } + + return idpool.NoID, nil +} + +// GetByID returns the key associated with an ID. Returns nil if no key is +// associated with the ID. +func (k *kvstoreBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) { + v, err := k.backend.Get(ctx, path.Join(k.idPrefix, id.String())) + if err != nil { + return nil, err + } + + if v == nil { + return nil, nil + } + + s, err := k.backend.Decode(string(v)) + if err != nil { + return nil, err + } + + return k.keyType.PutKey(string(s)), nil +} + +// UpdateKey refreshes the record that this node is using this key -> id +// mapping. When reliablyMissing is set it will also recreate missing master or +// slave keys. +func (k *kvstoreBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error { + var ( + err error + recreated bool + keyPath = path.Join(k.idPrefix, id.String()) + keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey()))) + valueKey = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix) + ) + + // Use of CreateOnly() ensures that any existing potentially + // conflicting key is never overwritten. + success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false) + switch { + case err != nil: + return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err) + case success: + log.WithField(fieldKey, keyPath).Warning("Re-created missing master key") + } + + // Also re-create the slave key in case it has been deleted. This will + // ensure that the next garbage collection cycle of any participating + // node does not remove the master key again. + if reliablyMissing { + recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true) + } else { + recreated, err = k.backend.UpdateIfDifferent(ctx, valueKey, []byte(id.String()), true) + } + switch { + case err != nil: + return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err) + case recreated: + log.WithField(fieldKey, valueKey).Warning("Re-created missing slave key") + } + + return nil +} + +// UpdateKeyIfLocked refreshes the record that this node is using this key -> id +// mapping. When reliablyMissing is set it will also recreate missing master or +// slave keys. +func (k *kvstoreBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error { + var ( + err error + recreated bool + keyPath = path.Join(k.idPrefix, id.String()) + keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey()))) + valueKey = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix) + ) + + // Use of CreateOnly() ensures that any existing potentially + // conflicting key is never overwritten. + success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock) + switch { + case err != nil: + return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err) + case success: + log.WithField(fieldKey, keyPath).Warning("Re-created missing master key") + } + + // Also re-create the slave key in case it has been deleted. This will + // ensure that the next garbage collection cycle of any participating + // node does not remove the master key again. + // lock is ignored since the key doesn't exist. + if reliablyMissing { + recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true) + } else { + recreated, err = k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(id.String()), true, lock) + } + switch { + case err != nil: + return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err) + case recreated: + log.WithField(fieldKey, valueKey).Warning("Re-created missing slave key") + } + + return nil +} + +// Release releases the use of an ID associated with the provided key. It does +// not guard against concurrent releases. This is currently guarded by +// Allocator.slaveKeysMutex when called from pkg/allocator.Allocator.Release. +func (k *kvstoreBackend) Release(ctx context.Context, _ idpool.ID, key allocator.AllocatorKey) (err error) { + valueKey := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix) + log.WithField(fieldKey, key).Info("Released last local use of key, invoking global release") + + // does not need to be deleted with a lock as its protected by the + // Allocator.slaveKeysMutex + if err := k.backend.Delete(ctx, valueKey); err != nil { + log.WithError(err).WithFields(logrus.Fields{fieldKey: key}).Warning("Ignoring node specific ID") + return err + } + + // if k.lockless { + // FIXME: etcd 3.3 will make it possible to do a lockless + // cleanup of the ID and release it right away. For now we rely + // on the GC to kick in a release unused IDs. + // } + + return nil +} + +// RunLocksGC scans the kvstore for unused locks and removes them. Returns +// a map of locks that are currently being held, including the ones that have +// failed to be GCed. +func (k *kvstoreBackend) RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) { + // fetch list of all /../locks keys + allocated, err := k.backend.ListPrefix(ctx, k.lockPrefix) + if err != nil { + return nil, fmt.Errorf("list failed: %s", err) + } + + staleKeys := map[string]kvstore.Value{} + + // iterate over /../locks + for key, v := range allocated { + scopedLog := log.WithFields(logrus.Fields{ + fieldKey: key, + fieldLeaseID: strconv.FormatUint(uint64(v.LeaseID), 16), + }) + // Only delete if this key was previously marked as to be deleted + if modRev, ok := staleKeysPrevRound[key]; ok && + // comparing ModRevision ensures the same client is still holding + // this lock since the last GC was called. + modRev.ModRevision == v.ModRevision && + modRev.LeaseID == v.LeaseID && + modRev.SessionID == v.SessionID { + if err := k.backend.Delete(ctx, key); err == nil { + scopedLog.Warning("Forcefully removed distributed lock due to client staleness." + + " Please check the connectivity between the KVStore and the client with that lease ID.") + continue + } + scopedLog.WithError(err). + Warning("Unable to remove distributed lock due to client staleness." + + " Please check the connectivity between the KVStore and the client with that lease ID.") + } + // If the key was not found mark it to be delete in the next RunGC + staleKeys[key] = kvstore.Value{ + ModRevision: v.ModRevision, + LeaseID: v.LeaseID, + SessionID: v.SessionID, + } + } + + return staleKeys, nil +} + +// RunGC scans the kvstore for unused master keys and removes them +func (k *kvstoreBackend) RunGC( + ctx context.Context, + rateLimit *rate.Limiter, + staleKeysPrevRound map[string]uint64, + minID, maxID idpool.ID, +) (map[string]uint64, *allocator.GCStats, error) { + + // fetch list of all /id/ keys + allocated, err := k.backend.ListPrefix(ctx, k.idPrefix) + if err != nil { + return nil, nil, fmt.Errorf("list failed: %s", err) + } + + totalEntries := len(allocated) + deletedEntries := 0 + + staleKeys := map[string]uint64{} + + min := uint64(minID) + max := uint64(maxID) + reasonOutOfRange := "out of local cluster identity range [" + strconv.FormatUint(min, 10) + "," + strconv.FormatUint(max, 10) + "]" + + // iterate over /id/ + for key, v := range allocated { + // if k.lockless { + // FIXME: Add DeleteOnZeroCount support + // } + + // Parse identity ID + items := strings.Split(key, "/") + if len(items) == 0 { + log.WithField(fieldKey, key).WithError(err).Warning("Unknown identity key found, skipping") + continue + } + + if identityID, err := strconv.ParseUint(items[len(items)-1], 10, 64); err != nil { + log.WithField(fieldKey, key).WithError(err).Warning("Parse identity failed, skipping") + continue + } else { + // We should not GC those identities that are out of our scope + if identityID < min || identityID > max { + log.WithFields(logrus.Fields{ + fieldKey: key, + "reason": reasonOutOfRange, + }).Debug("Skipping this key") + continue + } + } + + lock, err := k.lockPath(ctx, key) + if err != nil { + log.WithError(err).WithField(fieldKey, key).Warning("allocator garbage collector was unable to lock key") + continue + } + + // fetch list of all /value/ keys + valueKeyPrefix := path.Join(k.valuePrefix, string(v.Data)) + pairs, err := k.backend.ListPrefixIfLocked(ctx, valueKeyPrefix, lock) + if err != nil { + log.WithError(err).WithField(fieldPrefix, valueKeyPrefix).Warning("allocator garbage collector was unable to list keys") + lock.Unlock(context.Background()) + continue + } + + hasUsers := false + for prefix := range pairs { + if prefixMatchesKey(valueKeyPrefix, prefix) { + hasUsers = true + break + } + } + + var deleted bool + // if ID has no user, delete it + if !hasUsers { + scopedLog := log.WithFields(logrus.Fields{ + fieldKey: key, + fieldID: path.Base(key), + }) + // Only delete if this key was previously marked as to be deleted + if modRev, ok := staleKeysPrevRound[key]; ok { + // if the v.ModRevision is different than the modRev (which is + // the last seen v.ModRevision) then this key was re-used in + // between GC calls. We should not mark it as stale keys yet, + // but the next GC call will do it. + if modRev == v.ModRevision { + if err := k.backend.DeleteIfLocked(ctx, key, lock); err != nil { + scopedLog.WithError(err).Warning("Unable to delete unused allocator master key") + } else { + deletedEntries++ + scopedLog.Info("Deleted unused allocator master key") + } + // consider the key regardless if there was an error from + // the kvstore. We want to rate limit the number of requests + // done to the KVStore. + deleted = true + } + } else { + // If the key was not found mark it to be delete in the next RunGC + staleKeys[key] = v.ModRevision + } + } + + lock.Unlock(context.Background()) + if deleted { + // Wait after deleted the key. This is not ideal because we have + // done the operation that should be rate limited before checking the + // rate limit. We have to do this here to avoid holding the global lock + // for a long period of time. + err = rateLimit.Wait(ctx) + if err != nil { + return nil, nil, err + } + } + } + + gcStats := &allocator.GCStats{ + Alive: totalEntries - deletedEntries, + Deleted: deletedEntries, + } + return staleKeys, gcStats, nil +} + +func (k *kvstoreBackend) keyToID(key string) (id idpool.ID, err error) { + if !strings.HasPrefix(key, k.idPrefix) { + return idpool.NoID, fmt.Errorf("Found invalid key \"%s\" outside of prefix \"%s\"", key, k.idPrefix) + } + + suffix := strings.TrimPrefix(key, k.idPrefix) + if suffix[0] == '/' { + suffix = suffix[1:] + } + + idParsed, err := strconv.ParseUint(suffix, 10, 64) + if err != nil { + return idpool.NoID, fmt.Errorf("Cannot parse key suffix \"%s\"", suffix) + } + + return idpool.ID(idParsed), nil +} + +func (k *kvstoreBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) { + watcher := k.backend.ListAndWatch(ctx, k.idPrefix, 512) + + for { + select { + case event, ok := <-watcher.Events: + if !ok { + goto abort + } + if event.Typ == kvstore.EventTypeListDone { + handler.OnListDone() + continue + } + + id, err := k.keyToID(event.Key) + switch { + case err != nil: + log.WithError(err).WithField(fieldKey, event.Key).Warning("Invalid key") + + case id != idpool.NoID: + var key allocator.AllocatorKey + + if len(event.Value) > 0 { + s, err := k.backend.Decode(string(event.Value)) + if err != nil { + log.WithError(err).WithFields(logrus.Fields{ + fieldKey: event.Key, + fieldValue: event.Value, + }).Warning("Unable to decode key value") + continue + } + + key = k.keyType.PutKey(string(s)) + } else { + if event.Typ != kvstore.EventTypeDelete { + log.WithFields(logrus.Fields{ + fieldKey: event.Key, + fieldEventType: event.Typ, + }).Error("Received a key with an empty value") + continue + } + } + + switch event.Typ { + case kvstore.EventTypeCreate: + handler.OnAdd(id, key) + + case kvstore.EventTypeModify: + handler.OnModify(id, key) + + case kvstore.EventTypeDelete: + handler.OnDelete(id, key) + } + } + + case <-stopChan: + goto abort + } + } + +abort: + watcher.Stop() +} + +func (k *kvstoreBackend) Status() (string, error) { + return k.backend.Status() +} + +func (k *kvstoreBackend) Encode(v string) string { + return k.backend.Encode([]byte(v)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go new file mode 100644 index 0000000000..26c9a9aad1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package allocator provides a kvstore based ID allocator +package allocator diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go new file mode 100644 index 0000000000..e6f0330297 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package allocator + +const ( + fieldID = "id" + fieldKey = "key" + fieldPrefix = "prefix" + fieldValue = "value" + fieldLeaseID = "leaseID" + fieldEventType = "eventType" +) diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go b/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go new file mode 100644 index 0000000000..b9a5ad5da8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/cilium/cilium/pkg/time" +) + +type backendOption struct { + // description is the description of the option + description string + + // value is the value the option has been configured to + value string + + // validate, if set, is called to validate the value before assignment + validate func(value string) error +} + +type backendOptions map[string]*backendOption + +type ClusterSizeDependantIntervalFunc func(baseInterval time.Duration) time.Duration + +// ExtraOptions represents any options that can not be represented in a textual +// format and need to be set programmatically. +type ExtraOptions struct { + DialOption []grpc.DialOption + + // ClusterSizeDependantInterval defines the function to calculate + // intervals based on cluster size + ClusterSizeDependantInterval ClusterSizeDependantIntervalFunc + + // NoLockQuorumCheck disables the lock acquisition quorum check + NoLockQuorumCheck bool + + // ClusterName is the name of each etcd cluster + ClusterName string +} + +// StatusCheckInterval returns the interval of status checks depending on the +// cluster size and the current connectivity state +// +// nodes OK Failing +// 1 20s 3s +// 4 45s 7s +// 8 1m05s 11s +// 32 1m45s 18s +// 128 2m25s 24s +// 512 3m07s 32s +// 2048 3m46s 38s +// 8192 4m30s 45s +func (e *ExtraOptions) StatusCheckInterval(allConnected bool) time.Duration { + interval := 30 * time.Second + + // Reduce the interval while connectivity issues are being detected + if !allConnected { + interval = 5 * time.Second + } + + if e != nil && e.ClusterSizeDependantInterval != nil { + interval = e.ClusterSizeDependantInterval(interval) + } + return interval +} + +// backendModule is the interface that each kvstore backend has to implement. +type backendModule interface { + // getName must return the name of the backend + getName() string + + // setConfig must configure the backend with the specified options. + // This function is called once before newClient(). + setConfig(opts map[string]string) error + + // setExtraConfig sets more options in the kvstore that are not able to + // be set by strings. + setExtraConfig(opts *ExtraOptions) error + + // setConfigDummy must configure the backend with dummy configuration + // for testing purposes. This is a replacement for setConfig(). + setConfigDummy() + + // getConfig must return the backend configuration. + getConfig() map[string]string + + // newClient must initializes the backend and create a new kvstore + // client which implements the BackendOperations interface + newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error) + + // createInstance creates a new instance of the module + createInstance() backendModule +} + +var ( + // registeredBackends is a slice of all backends that have registered + // itself via registerBackend() + registeredBackends = map[string]backendModule{} +) + +// registerBackend must be called by kvstore backends to register themselves +func registerBackend(name string, module backendModule) { + if _, ok := registeredBackends[name]; ok { + log.Panicf("backend with name '%s' already registered", name) + } + + registeredBackends[name] = module +} + +// getBackend finds a registered backend by name +func getBackend(name string) backendModule { + if backend, ok := registeredBackends[name]; ok { + return backend.createInstance() + } + + return nil +} + +// BackendOperations are the individual kvstore operations that each backend +// must implement. Direct use of this interface is possible but will bypass the +// tracing layer. +type BackendOperations interface { + // Connected returns a channel which is closed whenever the kvstore client + // is connected to the kvstore server. + Connected(ctx context.Context) <-chan error + + // Disconnected returns a channel which is closed whenever the kvstore + // client is not connected to the kvstore server. (Only implemented for etcd) + Disconnected() <-chan struct{} + + // Status returns the status of the kvstore client including an + // eventual error + Status() (string, error) + + // StatusCheckErrors returns a channel which receives status check + // errors + StatusCheckErrors() <-chan error + + // LockPath locks the provided path + LockPath(ctx context.Context, path string) (KVLocker, error) + + // Get returns value of key + Get(ctx context.Context, key string) ([]byte, error) + + // GetIfLocked returns value of key if the client is still holding the given lock. + GetIfLocked(ctx context.Context, key string, lock KVLocker) ([]byte, error) + + // GetPrefix returns the first key which matches the prefix and its value + GetPrefix(ctx context.Context, prefix string) (string, []byte, error) + + // GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock. + GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (string, []byte, error) + + // Set sets value of key + Set(ctx context.Context, key string, value []byte) error + + // Delete deletes a key. It does not return an error if the key does not exist. + Delete(ctx context.Context, key string) error + + // DeleteIfLocked deletes a key if the client is still holding the given lock. It does not return an error if the key does not exist. + DeleteIfLocked(ctx context.Context, key string, lock KVLocker) error + + DeletePrefix(ctx context.Context, path string) error + + // Update creates or updates a key. + Update(ctx context.Context, key string, value []byte, lease bool) error + + // UpdateIfLocked updates a key if the client is still holding the given lock. + UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error + + // UpdateIfDifferent updates a key if the value is different + UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error) + + // UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock. + UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error) + + // CreateOnly atomically creates a key or fails if it already exists + CreateOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error) + + // CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists + CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error) + + // CreateIfExists creates a key with the value only if key condKey exists + CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) error + + // ListPrefix returns a list of keys matching the prefix + ListPrefix(ctx context.Context, prefix string) (KeyValuePairs, error) + + // ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock. + ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (KeyValuePairs, error) + + // Watch starts watching for changes in a prefix. If list is true, the + // current keys matching the prefix will be listed and reported as new + // keys first. + Watch(ctx context.Context, w *Watcher) + + // Close closes the kvstore client + Close(ctx context.Context) + + // GetCapabilities returns the capabilities of the backend + GetCapabilities() Capabilities + + // Encodes a binary slice into a character set that the backend + // supports + Encode(in []byte) string + + // Decodes a key previously encoded back into the original binary slice + Decode(in string) ([]byte, error) + + // ListAndWatch creates a new watcher which will watch the specified + // prefix for changes. Before doing this, it will list the current keys + // matching the prefix and report them as new keys. The Events channel is + // created with the specified sizes. Upon every change observed, a + // KeyValueEvent will be sent to the Events channel + ListAndWatch(ctx context.Context, prefix string, chanSize int) *Watcher + + // RegisterLeaseExpiredObserver registers a function which is executed when + // the lease associated with a key having the given prefix is detected as expired. + // If the function is nil, the previous observer (if any) is unregistered. + RegisterLeaseExpiredObserver(prefix string, fn func(key string)) + + BackendOperationsUserMgmt +} + +// BackendOperationsUserMgmt are the kvstore operations for users management. +type BackendOperationsUserMgmt interface { + // UserEnforcePresence creates a user in the kvstore if not already present, and grants the specified roles. + UserEnforcePresence(ctx context.Context, name string, roles []string) error + + // UserEnforcePresence deletes a user from the kvstore, if present. + UserEnforceAbsence(ctx context.Context, name string) error +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go b/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go new file mode 100644 index 0000000000..a5417c5b17 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import "context" + +const ( + // OperationalPath is the base path to store the operational details in the kvstore. + OperationalPath = "cilium-net/operational" + + // servicePathV1 is the base path for the services stored in the kvstore. + servicePathV1 = OperationalPath + "/Services/" +) + +// deleteLegacyPrefixes removes old kvstore prefixes of non-persistent keys +// which have been used in the past but have been obsoleted since. We remove +// them on agent start to ensure that as users upgrade, we do not leave behind +// stale keys +// +// Rules: +// +// - For non-persistent state, obsoletd prefixes can be deleted as soon as the +// prefix has been declared obsolete +// - For persistent configuration stored in the kvstore, a forward upgrade +// path must be created which automatically removes the old keys on successful +// translation. +func deleteLegacyPrefixes(ctx context.Context) { + // Delete all keys in old services prefix + Client().DeletePrefix(ctx, servicePathV1) +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go b/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go new file mode 100644 index 0000000000..3a7f0e6f28 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/hive" + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/promise" + "github.com/cilium/cilium/pkg/time" +) + +// Cell returns a cell which provides a promise for the global kvstore client. +// The parameter allows to customize the default backend, which can be either +// set to a specific value (e.g., in the case of clustermesh-apiserver) or +// left unset. +var Cell = func(defaultBackend string) cell.Cell { + return cell.Module( + "kvstore-client", + "KVStore Client", + + cell.Config(config{ + KVStore: defaultBackend, + KVStoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout, + KVStoreLeaseTTL: defaults.KVstoreLeaseTTL, + KVStorePeriodicSync: defaults.KVstorePeriodicSync, + }), + + cell.Provide(func(lc cell.Lifecycle, shutdowner hive.Shutdowner, cfg config, opts *ExtraOptions) promise.Promise[BackendOperations] { + resolver, promise := promise.New[BackendOperations]() + if cfg.KVStore == "" { + log.Info("Skipping connection to kvstore, as not configured") + resolver.Reject(errors.New("kvstore not configured")) + return promise + } + + // Propagate the options to the global variables for backward compatibility + option.Config.KVStore = cfg.KVStore + option.Config.KVStoreOpt = cfg.KVStoreOpt + option.Config.KVstoreConnectivityTimeout = cfg.KVStoreConnectivityTimeout + option.Config.KVstoreLeaseTTL = cfg.KVStoreLeaseTTL + option.Config.KVstorePeriodicSync = cfg.KVStorePeriodicSync + + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + + lc.Append(cell.Hook{ + OnStart: func(cell.HookContext) error { + wg.Add(1) + go func() { + defer wg.Done() + + log := log.WithField(logfields.BackendName, cfg.KVStore) + log.Info("Establishing connection to kvstore") + backend, errCh := NewClient(ctx, cfg.KVStore, cfg.KVStoreOpt, opts) + + if err, isErr := <-errCh; isErr { + log.WithError(err).Error("Failed to establish connection to kvstore") + resolver.Reject(fmt.Errorf("failed connecting to kvstore: %w", err)) + shutdowner.Shutdown(hive.ShutdownWithError(err)) + return + } + + log.Info("Connection to kvstore successfully established") + resolver.Resolve(backend) + }() + return nil + }, + OnStop: func(cell.HookContext) error { + cancel() + wg.Wait() + + // We don't explicitly close the backend here, because that would + // attempt to revoke the lease, causing all entries associated + // with that lease to be deleted. This would not be the + // behavior expected by the consumers of this cell. + return nil + }, + }) + + return promise + }), + ) +} + +type config struct { + KVStore string + KVStoreOpt map[string]string + KVStoreConnectivityTimeout time.Duration + KVStoreLeaseTTL time.Duration + KVStorePeriodicSync time.Duration +} + +func (def config) Flags(flags *pflag.FlagSet) { + flags.String(option.KVStore, def.KVStore, "Key-value store type") + + flags.StringToString(option.KVStoreOpt, def.KVStoreOpt, + "Key-value store options e.g. etcd.address=127.0.0.1:4001") + + flags.Duration(option.KVstoreConnectivityTimeout, def.KVStoreConnectivityTimeout, + "Time after which an incomplete kvstore operation is considered failed") + + flags.Duration(option.KVstoreLeaseTTL, def.KVStoreLeaseTTL, + "Time-to-live for the KVstore lease.") + flags.MarkHidden(option.KVstoreLeaseTTL) + + flags.Duration(option.KVstorePeriodicSync, def.KVStorePeriodicSync, + "Periodic KVstore synchronization interval") +} + +// GlobalUserMgmtClientPromiseCell provides a promise returning the global kvstore client to perform users +// management operations, once it has been initialized. +var GlobalUserMgmtClientPromiseCell = cell.Module( + "global-kvstore-users-client", + "Global KVStore Users Management Client Promise", + + cell.Provide(func(lc cell.Lifecycle, backendPromise promise.Promise[BackendOperations]) promise.Promise[BackendOperationsUserMgmt] { + resolver, promise := promise.New[BackendOperationsUserMgmt]() + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + + lc.Append(cell.Hook{ + OnStart: func(cell.HookContext) error { + wg.Add(1) + go func() { + backend, err := backendPromise.Await(ctx) + if err != nil { + resolver.Reject(err) + } else { + resolver.Resolve(backend) + } + wg.Done() + }() + return nil + }, + OnStop: func(cell.HookContext) error { + cancel() + wg.Wait() + return nil + }, + }) + + return promise + }), +) diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/client.go b/vendor/github.com/cilium/cilium/pkg/kvstore/client.go new file mode 100644 index 0000000000..60b68f1091 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/client.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "fmt" + + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" +) + +var ( + // defaultClient is the default client initialized by initClient + defaultClient BackendOperations + // defaultClientSet is a channel that is closed whenever the defaultClient + // is set. + defaultClientSet = make(chan struct{}) +) + +func initClient(ctx context.Context, module backendModule, opts *ExtraOptions) error { + scopedLog := log.WithField(fieldKVStoreModule, module.getName()) + c, errChan := module.newClient(ctx, opts) + if c == nil { + err := <-errChan + scopedLog.WithError(err).Fatal("Unable to create kvstore client") + } + + defaultClient = c + select { + case <-defaultClientSet: + // avoid closing channel already closed. + default: + close(defaultClientSet) + } + + go func() { + err, isErr := <-errChan + if isErr && err != nil { + scopedLog.WithError(err).Fatal("Unable to connect to kvstore") + } + if !option.Config.JoinCluster { + deleteLegacyPrefixes(ctx) + } + }() + + return nil +} + +// Client returns the global kvstore, blocking until it has been configured +func Client() BackendOperations { + <-defaultClientSet + return defaultClient +} + +// NewClient returns a new kvstore client based on the configuration +func NewClient(ctx context.Context, selectedBackend string, opts map[string]string, options *ExtraOptions) (BackendOperations, chan error) { + // Channel used to report immediate errors, module.newClient will + // create and return a different channel, caller doesn't need to know + errChan := make(chan error, 1) + defer close(errChan) + + module := getBackend(selectedBackend) + if module == nil { + errChan <- fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend) + return nil, errChan + } + + if err := module.setConfig(opts); err != nil { + errChan <- err + return nil, errChan + } + + if err := module.setExtraConfig(options); err != nil { + errChan <- err + return nil, errChan + } + + return module.newClient(ctx, options) +} + +// Connected returns a channel which is closed when the following conditions +// are being met at the same time: +// * The kvstore client is configured +// * Connectivity to the kvstore has been established +// * The kvstore has quorum +// +// The channel will *not* be closed if the kvstore client is closed before +// connectivity or quorum has been achieved. It will wait until a new kvstore +// client is configured to again wait for connectivity and quorum. +func Connected() <-chan struct{} { + c := make(chan struct{}) + go func(c chan struct{}) { + for { + if err := <-Client().Connected(context.Background()); err == nil { + close(c) + return + } + time.Sleep(100 * time.Millisecond) + } + }(c) + return c +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/config.go b/vendor/github.com/cilium/cilium/pkg/kvstore/config.go new file mode 100644 index 0000000000..7e85d4f066 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/config.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "fmt" + "sync" + + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + // selectedModule is the name of the selected backend module + selectedModule string +) + +// setOpts validates the specified options against the selected backend and +// then modifies the configuration +func setOpts(opts map[string]string, supportedOpts backendOptions) error { + errors := 0 + + for key, val := range opts { + opt, ok := supportedOpts[key] + if !ok { + errors++ + log.WithField(logfields.Key, key).Error("unknown kvstore configuration key") + continue + } + + if opt.validate != nil { + if err := opt.validate(val); err != nil { + log.WithError(err).Errorf("invalid value for key %s", key) + errors++ + } + } + + } + + // if errors have occurred, print the supported configuration keys to + // the log + if errors > 0 { + log.Error("Supported configuration keys:") + for key, val := range supportedOpts { + log.Errorf(" %-12s %s", key, val.description) + } + + return fmt.Errorf("invalid kvstore configuration, see log for details") + } + + // modify the configuration atomically after verification + for key, val := range opts { + supportedOpts[key].value = val + } + + return nil +} + +func getOpts(opts backendOptions) map[string]string { + result := map[string]string{} + + for key, opt := range opts { + result[key] = opt.value + } + + return result +} + +var ( + setupOnce sync.Once +) + +func setup(ctx context.Context, selectedBackend string, opts map[string]string, goOpts *ExtraOptions) error { + module := getBackend(selectedBackend) + if module == nil { + return fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend) + } + + if err := module.setConfig(opts); err != nil { + return err + } + + if err := module.setExtraConfig(goOpts); err != nil { + return err + } + + selectedModule = module.getName() + + return initClient(ctx, module, goOpts) +} + +// Setup sets up the key-value store specified in kvStore and configures it +// with the options provided in opts +func Setup(ctx context.Context, selectedBackend string, opts map[string]string, goOpts *ExtraOptions) error { + var err error + + setupOnce.Do(func() { + err = setup(ctx, selectedBackend, opts, goOpts) + }) + + return err +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go b/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go new file mode 100644 index 0000000000..d4fb8bfbe3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go @@ -0,0 +1,790 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "os" + + consulAPI "github.com/hashicorp/consul/api" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/cilium/cilium/pkg/backoff" + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/spanstat" + "github.com/cilium/cilium/pkg/time" +) + +const ( + consulName = "consul" + + // ConsulAddrOption is the string representing the key mapping to the value of the + // address for Consul. + ConsulAddrOption = "consul.address" + ConsulOptionConfig = "consul.tlsconfig" + + // maxLockRetries is the number of retries attempted when acquiring a lock + maxLockRetries = 10 +) + +type consulModule struct { + opts backendOptions + config *consulAPI.Config +} + +var ( + // consulDummyAddress can be overwritten from test invokers using ldflags + consulDummyAddress = "https://127.0.0.1:8501" + // consulDummyConfigFile can be overwritten from test invokers using ldflags + consulDummyConfigFile = "/tmp/cilium-consul-certs/cilium-consul.yaml" + + module = newConsulModule() + + // ErrNotImplemented is the error which is returned when a functionality is not implemented. + ErrNotImplemented = errors.New("not implemented") + + consulLeaseKeepaliveControllerGroup = controller.NewGroup("consul-lease-keepalive") +) + +func init() { + // register consul module for use + registerBackend(consulName, module) +} + +func newConsulModule() backendModule { + return &consulModule{ + opts: backendOptions{ + ConsulAddrOption: &backendOption{ + description: "Addresses of consul cluster", + }, + ConsulOptionConfig: &backendOption{ + description: "Path to consul tls configuration file", + }, + }, + } +} + +func ConsulDummyAddress() string { + return consulDummyAddress +} + +func ConsulDummyConfigFile() string { + return consulDummyConfigFile +} + +func (c *consulModule) createInstance() backendModule { + return newConsulModule() +} + +func (c *consulModule) getName() string { + return consulName +} + +func (c *consulModule) setConfigDummy() { + c.config = consulAPI.DefaultConfig() + c.config.Address = consulDummyAddress + yc := consulAPI.TLSConfig{} + b, err := os.ReadFile(consulDummyConfigFile) + if err != nil { + log.WithError(err).Warnf("unable to read consul tls configuration file %s", consulDummyConfigFile) + } + + err = yaml.Unmarshal(b, &yc) + if err != nil { + log.WithError(err).Warnf("invalid consul tls configuration in %s", consulDummyConfigFile) + } + + c.config.TLSConfig = yc +} + +func (c *consulModule) setConfig(opts map[string]string) error { + return setOpts(opts, c.opts) +} + +func (c *consulModule) setExtraConfig(opts *ExtraOptions) error { + return nil +} + +func (c *consulModule) getConfig() map[string]string { + return getOpts(c.opts) +} + +func (c *consulModule) newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error) { + log.WithFields(logrus.Fields{ + logfields.URL: "https://cilium.herokuapp.com/", + }).Warning("Support for Consul as a kvstore backend has been deprecated due to lack of maintainers. If you are interested in helping to maintain Consul support in Cilium, please reach out on GitHub or the official Cilium slack") + + errChan := make(chan error, 1) + backend, err := c.connectConsulClient(ctx, opts) + if err != nil { + errChan <- err + } + close(errChan) + return backend, errChan +} + +func (c *consulModule) connectConsulClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, error) { + if c.config == nil { + consulAddr, consulAddrSet := c.opts[ConsulAddrOption] + configPathOpt, configPathOptSet := c.opts[ConsulOptionConfig] + if !consulAddrSet { + return nil, fmt.Errorf("invalid consul configuration, please specify %s option", ConsulAddrOption) + } + + if consulAddr.value == "" { + return nil, fmt.Errorf("invalid consul configuration, please specify %s option", ConsulAddrOption) + } + + addr := consulAddr.value + c.config = consulAPI.DefaultConfig() + if configPathOptSet && configPathOpt.value != "" { + b, err := os.ReadFile(configPathOpt.value) + if err != nil { + return nil, fmt.Errorf("unable to read consul tls configuration file %s: %s", configPathOpt.value, err) + } + yc := consulAPI.TLSConfig{} + err = yaml.Unmarshal(b, &yc) + if err != nil { + return nil, fmt.Errorf("invalid consul tls configuration in %s: %s", configPathOpt.value, err) + } + c.config.TLSConfig = yc + } + + c.config.Address = addr + + } + client, err := newConsulClient(ctx, c.config, opts) + if err != nil { + return nil, err + } + + return client, nil +} + +var ( + maxRetries = 30 +) + +type consulClient struct { + *consulAPI.Client + lease string + controllers *controller.Manager + extraOptions *ExtraOptions + disconnectedMu lock.RWMutex + disconnected chan struct{} + statusCheckErrors chan error +} + +func newConsulClient(ctx context.Context, config *consulAPI.Config, opts *ExtraOptions) (BackendOperations, error) { + var ( + c *consulAPI.Client + err error + ) + if config != nil { + c, err = consulAPI.NewClient(config) + } else { + c, err = consulAPI.NewClient(consulAPI.DefaultConfig()) + } + if err != nil { + return nil, err + } + + boff := backoff.Exponential{Min: time.Duration(100) * time.Millisecond} + + for i := 0; i < maxRetries; i++ { + var leader string + leader, err = c.Status().Leader() + + if err == nil { + if leader != "" { + // happy path + break + } else { + err = errors.New("timeout while waiting for leader to be elected") + } + } + log.Info("Waiting for consul to elect a leader") + boff.Wait(ctx) + } + + if err != nil { + log.WithError(err).Fatal("Unable to contact consul server") + } + + entry := &consulAPI.SessionEntry{ + TTL: fmt.Sprintf("%ds", int(option.Config.KVstoreLeaseTTL.Seconds())), + Behavior: consulAPI.SessionBehaviorDelete, + } + + wo := &consulAPI.WriteOptions{} + lease, _, err := c.Session().Create(entry, wo.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("unable to create default lease: %s", err) + } + + client := &consulClient{ + Client: c, + lease: lease, + controllers: controller.NewManager(), + extraOptions: opts, + disconnected: make(chan struct{}), + statusCheckErrors: make(chan error, 128), + } + + client.controllers.UpdateController( + fmt.Sprintf("consul-lease-keepalive-%p", c), + controller.ControllerParams{ + Group: consulLeaseKeepaliveControllerGroup, + DoFunc: func(ctx context.Context) error { + wo := &consulAPI.WriteOptions{} + _, _, err := c.Session().Renew(lease, wo.WithContext(ctx)) + if err != nil { + // consider disconnected! + client.disconnectedMu.Lock() + close(client.disconnected) + client.disconnected = make(chan struct{}) + client.disconnectedMu.Unlock() + } + return err + }, + RunInterval: option.Config.KVstoreKeepAliveInterval, + }, + ) + + return client, nil +} + +type ConsulLocker struct { + *consulAPI.Lock +} + +func (cl *ConsulLocker) Unlock(ctx context.Context) error { + return cl.Lock.Unlock() +} + +func (cl *ConsulLocker) Comparator() interface{} { + return nil +} + +func (c *consulClient) LockPath(ctx context.Context, path string) (KVLocker, error) { + lockKey, err := c.LockOpts(&consulAPI.LockOptions{Key: getLockPath(path)}) + if err != nil { + return nil, err + } + + for retries := 0; retries < maxLockRetries; retries++ { + ch, err := lockKey.Lock(nil) + switch { + case err != nil: + return nil, err + case ch == nil: + Trace("Acquiring lock timed out, retrying", nil, logrus.Fields{fieldKey: path, logfields.Attempt: retries}) + default: + return &ConsulLocker{Lock: lockKey}, err + } + + select { + case <-ctx.Done(): + return nil, fmt.Errorf("lock cancelled via context: %s", ctx.Err()) + default: + } + } + + return nil, fmt.Errorf("maximum retries (%d) reached", maxLockRetries) +} + +// Watch starts watching for changes in a prefix +func (c *consulClient) Watch(ctx context.Context, w *Watcher) { + // Last known state of all KVPairs matching the prefix + localState := map[string]consulAPI.KVPair{} + nextIndex := uint64(0) + + q := &consulAPI.QueryOptions{ + WaitTime: time.Second, + } + + qo := q.WithContext(ctx) + + sleepTimer, sleepTimerDone := inctimer.New() + defer sleepTimerDone() + + for { + // Initialize sleep time to a millisecond as we don't + // want to sleep in between successful watch cycles + sleepTime := 1 * time.Millisecond + + qo.WaitIndex = nextIndex + pairs, q, err := c.KV().List(w.Prefix, qo) + if err != nil { + sleepTime = 5 * time.Second + Trace("List of Watch failed", err, logrus.Fields{fieldPrefix: w.Prefix}) + } + + if q != nil { + nextIndex = q.LastIndex + } + + // timeout while watching for changes, re-schedule + if qo.WaitIndex != 0 && (q == nil || q.LastIndex == qo.WaitIndex) { + goto wait + } + + for _, newPair := range pairs { + oldPair, ok := localState[newPair.Key] + + // Keys reported for the first time must be new + if !ok { + if newPair.CreateIndex != newPair.ModifyIndex { + log.Debugf("consul: Previously unknown key %s received with CreateIndex(%d) != ModifyIndex(%d)", + newPair.Key, newPair.CreateIndex, newPair.ModifyIndex) + } + + queueStart := spanstat.Start() + w.Events <- KeyValueEvent{ + Typ: EventTypeCreate, + Key: newPair.Key, + Value: newPair.Value, + } + trackEventQueued(newPair.Key, EventTypeCreate, queueStart.End(true).Total()) + } else if oldPair.ModifyIndex != newPair.ModifyIndex { + queueStart := spanstat.Start() + w.Events <- KeyValueEvent{ + Typ: EventTypeModify, + Key: newPair.Key, + Value: newPair.Value, + } + trackEventQueued(newPair.Key, EventTypeModify, queueStart.End(true).Total()) + } + + // Everything left on localState will be assumed to + // have been deleted, therefore remove all keys in + // localState that still exist in the kvstore + delete(localState, newPair.Key) + } + + for k, deletedPair := range localState { + queueStart := spanstat.Start() + w.Events <- KeyValueEvent{ + Typ: EventTypeDelete, + Key: deletedPair.Key, + Value: deletedPair.Value, + } + trackEventQueued(deletedPair.Key, EventTypeDelete, queueStart.End(true).Total()) + delete(localState, k) + } + + for _, newPair := range pairs { + localState[newPair.Key] = *newPair + + } + + // Initial list operation has been completed, signal this + if qo.WaitIndex == 0 { + w.Events <- KeyValueEvent{Typ: EventTypeListDone} + } + + wait: + select { + case <-sleepTimer.After(sleepTime): + case <-w.stopWatch: + close(w.Events) + w.stopWait.Done() + return + } + } +} + +func (c *consulClient) waitForInitLock(ctx context.Context) <-chan struct{} { + initLockSucceeded := make(chan struct{}) + + go func() { + for { + locker, err := c.LockPath(ctx, InitLockPath) + if err == nil { + locker.Unlock(context.Background()) + close(initLockSucceeded) + log.Info("Distributed lock successful, consul has quorum") + return + } + + time.Sleep(100 * time.Millisecond) + } + }() + + return initLockSucceeded +} + +// Connected closes the returned channel when the consul client is connected. +func (c *consulClient) Connected(ctx context.Context) <-chan error { + ch := make(chan error) + go func() { + for { + qo := &consulAPI.QueryOptions{} + // TODO find out if there's a better way to do this for consul + _, _, err := c.Session().Info(c.lease, qo.WithContext(ctx)) + if err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + <-c.waitForInitLock(ctx) + close(ch) + }() + return ch +} + +// Disconnected closes the returned channel when consul detects the client +// is disconnected from the server. +func (c *consulClient) Disconnected() <-chan struct{} { + c.disconnectedMu.RLock() + ch := c.disconnected + c.disconnectedMu.RUnlock() + return ch +} + +func (c *consulClient) Status() (string, error) { + leader, err := c.Client.Status().Leader() + return "Consul: " + leader, err +} + +func (c *consulClient) DeletePrefix(ctx context.Context, path string) (err error) { + defer func() { Trace("DeletePrefix", err, logrus.Fields{fieldPrefix: path}) }() + + duration := spanstat.Start() + wo := &consulAPI.WriteOptions{} + _, err = c.Client.KV().DeleteTree(path, wo.WithContext(ctx)) + increaseMetric(path, metricDelete, "DeletePrefix", duration.EndError(err).Total(), err) + return err +} + +// Set sets value of key +func (c *consulClient) Set(ctx context.Context, key string, value []byte) (err error) { + defer func() { Trace("Set", err, logrus.Fields{fieldKey: key, fieldValue: string(value)}) }() + + duration := spanstat.Start() + wo := &consulAPI.WriteOptions{} + _, err = c.KV().Put(&consulAPI.KVPair{Key: key, Value: value}, wo.WithContext(ctx)) + increaseMetric(key, metricSet, "Set", duration.EndError(err).Total(), err) + return err +} + +// DeleteIfLocked deletes a key if the client is still holding the given lock. +func (c *consulClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) (err error) { + defer func() { Trace("DeleteIfLocked", err, logrus.Fields{fieldKey: key}) }() + return c.delete(ctx, key) +} + +// Delete deletes a key +func (c *consulClient) Delete(ctx context.Context, key string) (err error) { + defer func() { Trace("Delete", err, logrus.Fields{fieldKey: key}) }() + return c.delete(ctx, key) +} + +func (c *consulClient) delete(ctx context.Context, key string) error { + duration := spanstat.Start() + wo := &consulAPI.WriteOptions{} + _, err := c.KV().Delete(key, wo.WithContext(ctx)) + increaseMetric(key, metricDelete, "Delete", duration.EndError(err).Total(), err) + return err +} + +// GetIfLocked returns value of key if the client is still holding the given lock. +func (c *consulClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) (bv []byte, err error) { + defer func() { Trace("GetIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) }() + return c.Get(ctx, key) +} + +// Get returns value of key +func (c *consulClient) Get(ctx context.Context, key string) (bv []byte, err error) { + defer func() { Trace("Get", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) }() + + duration := spanstat.Start() + qo := &consulAPI.QueryOptions{} + pair, _, err := c.KV().Get(key, qo.WithContext(ctx)) + increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + return pair.Value, nil +} + +// GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock. +func (c *consulClient) GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (k string, bv []byte, err error) { + defer func() { + Trace("GetPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)}) + }() + return c.getPrefix(ctx, prefix) +} + +// GetPrefix returns the first key which matches the prefix and its value +func (c *consulClient) GetPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) { + defer func() { + Trace("GetPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)}) + }() + return c.getPrefix(ctx, prefix) +} + +func (c *consulClient) getPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) { + duration := spanstat.Start() + opts := &consulAPI.QueryOptions{} + pairs, _, err := c.KV().List(prefix, opts.WithContext(ctx)) + increaseMetric(prefix, metricRead, "GetPrefix", duration.EndError(err).Total(), err) + if err != nil { + return "", nil, err + } + + if len(pairs) == 0 { + return "", nil, nil + } + + return pairs[0].Key, pairs[0].Value, nil +} + +// UpdateIfLocked updates a key if the client is still holding the given lock. +func (c *consulClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error { + return c.Update(ctx, key, value, lease) +} + +// Update creates or updates a key with the value +func (c *consulClient) Update(ctx context.Context, key string, value []byte, lease bool) (err error) { + defer func() { + Trace("Update", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease}) + }() + + k := &consulAPI.KVPair{Key: key, Value: value} + + if lease { + k.Session = c.lease + } + + opts := &consulAPI.WriteOptions{} + + duration := spanstat.Start() + _, err = c.KV().Put(k, opts.WithContext(ctx)) + increaseMetric(key, metricSet, "Update", duration.EndError(err).Total(), err) + return err +} + +// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock. +func (c *consulClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (recreated bool, err error) { + defer func() { + Trace("UpdateIfDifferentIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated}) + }() + + return c.updateIfDifferent(ctx, key, value, lease) +} + +// UpdateIfDifferent updates a key if the value is different +func (c *consulClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (recreated bool, err error) { + defer func() { + Trace("UpdateIfDifferent", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated}) + }() + + return c.updateIfDifferent(ctx, key, value, lease) +} + +func (c *consulClient) updateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error) { + duration := spanstat.Start() + qo := &consulAPI.QueryOptions{} + getR, _, err := c.KV().Get(key, qo.WithContext(ctx)) + increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err) + // On error, attempt update blindly + if err != nil || getR == nil { + return true, c.Update(ctx, key, value, lease) + } + + if lease && getR.Session != c.lease { + return true, c.Update(ctx, key, value, lease) + } + + // if lease is different and value is not equal then update. + if !bytes.Equal(getR.Value, value) { + return true, c.Update(ctx, key, value, lease) + } + + return false, nil +} + +// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists +func (c *consulClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (success bool, err error) { + defer func() { + Trace("CreateOnlyIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success}) + }() + return c.createOnly(ctx, key, value, lease) +} + +// CreateOnly creates a key with the value and will fail if the key already exists +func (c *consulClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (success bool, err error) { + defer func() { + Trace("CreateOnly", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success}) + }() + + return c.createOnly(ctx, key, value, lease) +} + +func (c *consulClient) createOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error) { + k := &consulAPI.KVPair{ + Key: key, + Value: value, + CreateIndex: 0, + } + + if lease { + k.Session = c.lease + } + opts := &consulAPI.WriteOptions{} + + duration := spanstat.Start() + success, _, err := c.KV().CAS(k, opts.WithContext(ctx)) + increaseMetric(key, metricSet, "CreateOnly", duration.EndError(err).Total(), err) + if err != nil { + return false, fmt.Errorf("unable to compare-and-swap: %s", err) + } + return success, nil +} + +// createIfExists creates a key with the value only if key condKey exists +func (c *consulClient) createIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) error { + // Consul does not support transactions which would allow to check for + // the presence of a conditional key if the key is not the key being + // manipulated + // + // Lock the conditional key to serialize all CreateIfExists() calls + + l, err := LockPath(ctx, c, condKey) + if err != nil { + return fmt.Errorf("unable to lock condKey for CreateIfExists: %s", err) + } + + defer l.Unlock(context.Background()) + + // Create the key if it does not exist + if _, err := c.CreateOnly(ctx, key, value, lease); err != nil { + return err + } + + // Consul does not support transactions which would allow to check for + // the presence of another key + masterKey, err := c.Get(ctx, condKey) + if err != nil || masterKey == nil { + c.Delete(ctx, key) + return fmt.Errorf("conditional key not present") + } + + return nil +} + +// CreateIfExists creates a key with the value only if key condKey exists +func (c *consulClient) CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) (err error) { + defer func() { + Trace("CreateIfExists", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldCondition: condKey, fieldAttachLease: lease}) + }() + + duration := spanstat.Start() + err = c.createIfExists(ctx, condKey, key, value, lease) + increaseMetric(key, metricSet, "CreateIfExists", duration.EndError(err).Total(), err) + return err +} + +// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock. +func (c *consulClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (v KeyValuePairs, err error) { + defer func() { Trace("ListPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) }() + return c.listPrefix(ctx, prefix) +} + +// ListPrefix returns a map of matching keys +func (c *consulClient) ListPrefix(ctx context.Context, prefix string) (v KeyValuePairs, err error) { + defer func() { Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) }() + return c.listPrefix(ctx, prefix) +} + +func (c *consulClient) listPrefix(ctx context.Context, prefix string) (KeyValuePairs, error) { + duration := spanstat.Start() + qo := &consulAPI.QueryOptions{} + pairs, _, err := c.KV().List(prefix, qo.WithContext(ctx)) + increaseMetric(prefix, metricRead, "ListPrefix", duration.EndError(err).Total(), err) + if err != nil { + return nil, err + } + + p := KeyValuePairs(make(map[string]Value, len(pairs))) + for i := 0; i < len(pairs); i++ { + p[pairs[i].Key] = Value{ + Data: pairs[i].Value, + ModRevision: pairs[i].ModifyIndex, + SessionID: pairs[i].Session, + } + } + + return p, nil +} + +// Close closes the consul session +func (c *consulClient) Close(ctx context.Context) { + close(c.statusCheckErrors) + if c.controllers != nil { + c.controllers.RemoveAll() + } + if c.lease != "" { + c.Session().Destroy(c.lease, nil) + } +} + +// GetCapabilities returns the capabilities of the backend +func (c *consulClient) GetCapabilities() Capabilities { + return Capabilities(0) +} + +// Encode encodes a binary slice into a character set that the backend supports +func (c *consulClient) Encode(in []byte) (out string) { + defer func() { Trace("Encode", nil, logrus.Fields{"in": in, "out": out}) }() + return base64.URLEncoding.EncodeToString([]byte(in)) +} + +// Decode decodes a key previously encoded back into the original binary slice +func (c *consulClient) Decode(in string) (out []byte, err error) { + defer func() { Trace("Decode", err, logrus.Fields{"in": in, "out": out}) }() + return base64.URLEncoding.DecodeString(in) +} + +// ListAndWatch implements the BackendOperations.ListAndWatch using consul +func (c *consulClient) ListAndWatch(ctx context.Context, prefix string, chanSize int) *Watcher { + w := newWatcher(prefix, chanSize) + + log.WithField(fieldPrefix, prefix).Debug("Starting watcher...") + + go c.Watch(ctx, w) + + return w +} + +// StatusCheckErrors returns a channel which receives status check errors +func (c *consulClient) StatusCheckErrors() <-chan error { + return c.statusCheckErrors +} + +// RegisterLeaseExpiredObserver is not implemented for the consul backend +func (c *consulClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) {} + +// UserEnforcePresence is not implemented for the consul backend +func (c *consulClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error { + return ErrNotImplemented +} + +// UserEnforceAbsence is not implemented for the consul backend +func (c *consulClient) UserEnforceAbsence(ctx context.Context, name string) error { + return ErrNotImplemented +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go new file mode 100644 index 0000000000..b69800a22f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package kvstore abstracts KVstore access and provides a high level API to +// atomically manage cluster wide resources +package kvstore diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go new file mode 100644 index 0000000000..989c24a42a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "testing" + + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/time" +) + +// SetupDummy sets up kvstore for tests. A lock mechanism it used to prevent +// the creation of two clients at the same time, to avoid interferences in case +// different tests are run in parallel. A cleanup function is automatically +// registered to delete all keys and close the client when the test terminates. +func SetupDummy(tb testing.TB, dummyBackend string) { + SetupDummyWithConfigOpts(tb, dummyBackend, nil) +} + +// SetupDummyWithConfigOpts sets up the dummy kvstore for tests but also +// configures the module with the provided opts. A lock mechanism it used to +// prevent the creation of two clients at the same time, to avoid interferences +// in case different tests are run in parallel. A cleanup function is +// automatically registered to delete all keys and close the client when the +// test terminates. +func SetupDummyWithConfigOpts(tb testing.TB, dummyBackend string, opts map[string]string) { + module := getBackend(dummyBackend) + if module == nil { + tb.Fatalf("Unknown dummy kvstore backend %s", dummyBackend) + } + + module.setConfigDummy() + + if opts != nil { + err := module.setConfig(opts) + if err != nil { + tb.Fatalf("Unable to set config options for kvstore backend module: %v", err) + } + } + + if err := initClient(context.Background(), module, nil); err != nil { + tb.Fatalf("Unable to initialize kvstore client: %v", err) + } + + tb.Cleanup(func() { + if err := Client().DeletePrefix(context.Background(), ""); err != nil { + tb.Fatalf("Unable to delete all kvstore keys: %v", err) + } + + Client().Close(context.Background()) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + + if err := <-Client().Connected(ctx); err != nil { + tb.Fatalf("Failed waiting for kvstore connection to be established: %v", err) + } + + timer, done := inctimer.New() + defer done() + + // Multiple tests might be running in parallel by go test if they are part of + // different packages. Let's implement a locking mechanism to ensure that only + // one at a time can access the kvstore, to prevent that they interact with + // each other. Locking is implemented through CreateOnly (rather than using + // the locking abstraction), so that we can release it in the same atomic + // transaction that also removes all the other keys. + for { + succeeded, err := Client().CreateOnly(ctx, ".lock", []byte(""), true) + if err != nil { + tb.Fatalf("Unable to acquire the kvstore lock: %v", err) + } + + if succeeded { + return + } + + select { + case <-timer.After(100 * time.Millisecond): + case <-ctx.Done(): + tb.Fatal("Timed out waiting to acquire the kvstore lock") + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go new file mode 100644 index 0000000000..78c68293da --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go @@ -0,0 +1,1894 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "os" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "go.etcd.io/etcd/api/v3/mvccpb" + v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.etcd.io/etcd/client/pkg/v3/tlsutil" + client "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/concurrency" + clientyaml "go.etcd.io/etcd/client/v3/yaml" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/time/rate" + "sigs.k8s.io/yaml" + + "github.com/cilium/cilium/pkg/backoff" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/rand" + ciliumrate "github.com/cilium/cilium/pkg/rate" + ciliumratemetrics "github.com/cilium/cilium/pkg/rate/metrics" + "github.com/cilium/cilium/pkg/spanstat" + "github.com/cilium/cilium/pkg/time" +) + +const ( + // EtcdBackendName is the backend name for etcd + EtcdBackendName = "etcd" + + EtcdAddrOption = "etcd.address" + isEtcdOperatorOption = "etcd.operator" + EtcdOptionConfig = "etcd.config" + EtcdOptionKeepAliveHeartbeat = "etcd.keepaliveHeartbeat" + EtcdOptionKeepAliveTimeout = "etcd.keepaliveTimeout" + + // EtcdRateLimitOption specifies maximum kv operations per second + EtcdRateLimitOption = "etcd.qps" + + // EtcdMaxInflightOption specifies maximum inflight concurrent kv store operations + EtcdMaxInflightOption = "etcd.maxInflight" + + // EtcdListLimitOption limits the number of results retrieved in one batch + // by ListAndWatch operations. A 0 value equals to no limit. + EtcdListLimitOption = "etcd.limit" + + // etcdMaxKeysPerLease is the maximum number of keys that can be attached to a lease + etcdMaxKeysPerLease = 1000 +) + +var ( + // ErrLockLeaseExpired is an error whenever the lease of the lock does not + // exist or it was expired. + ErrLockLeaseExpired = errors.New("transaction did not succeed: lock lease expired") + + randGen = rand.NewSafeRand(time.Now().UnixNano()) +) + +type etcdModule struct { + opts backendOptions + config *client.Config +} + +var ( + // statusCheckTimeout is the timeout when performing status checks with + // all etcd endpoints + statusCheckTimeout = 10 * time.Second + + // initialConnectionTimeout is the timeout for the initial connection to + // the etcd server + initialConnectionTimeout = 15 * time.Minute + + // etcdDummyAddress can be overwritten from test invokers using ldflags + etcdDummyAddress = "http://127.0.0.1:4002" + + etcdInstance = newEtcdModule() + + // etcd3ClientLogger is the logger used for the underlying etcd clients. We + // explicitly initialize a logger and propagate it to prevent each client from + // automatically creating a new one, which comes with a significant memory cost. + etcd3ClientLogger *zap.Logger +) + +func EtcdDummyAddress() string { + return etcdDummyAddress +} + +func newEtcdModule() backendModule { + return &etcdModule{ + opts: backendOptions{ + isEtcdOperatorOption: &backendOption{ + description: "if the configuration is setting up an etcd-operator", + }, + EtcdAddrOption: &backendOption{ + description: "Addresses of etcd cluster", + }, + EtcdOptionConfig: &backendOption{ + description: "Path to etcd configuration file", + }, + EtcdOptionKeepAliveTimeout: &backendOption{ + description: "Timeout after which an unanswered heartbeat triggers the connection to be closed", + validate: func(v string) error { + _, err := time.ParseDuration(v) + return err + }, + }, + EtcdOptionKeepAliveHeartbeat: &backendOption{ + description: "Heartbeat interval to keep gRPC connection alive", + validate: func(v string) error { + _, err := time.ParseDuration(v) + return err + }, + }, + EtcdRateLimitOption: &backendOption{ + description: "Rate limit in kv store operations per second", + validate: func(v string) error { + _, err := strconv.Atoi(v) + return err + }, + }, + EtcdMaxInflightOption: &backendOption{ + description: "Maximum inflight concurrent kv store operations; defaults to etcd.qps if unset", + validate: func(v string) error { + _, err := strconv.Atoi(v) + return err + }, + }, + EtcdListLimitOption: &backendOption{ + description: "Max number of results retrieved in one batch by ListAndWatch operations (0 = no limit)", + validate: func(v string) error { + _, err := strconv.Atoi(v) + return err + }, + }, + }, + } +} + +func (e *etcdModule) createInstance() backendModule { + return newEtcdModule() +} + +func (e *etcdModule) getName() string { + return EtcdBackendName +} + +func (e *etcdModule) setConfigDummy() { + e.config = &client.Config{} + e.config.Endpoints = []string{etcdDummyAddress} +} + +func (e *etcdModule) setConfig(opts map[string]string) error { + return setOpts(opts, e.opts) +} + +func (e *etcdModule) setExtraConfig(opts *ExtraOptions) error { + if opts != nil && len(opts.DialOption) != 0 { + e.config = &client.Config{} + e.config.DialOptions = append(e.config.DialOptions, opts.DialOption...) + } + return nil +} + +func (e *etcdModule) getConfig() map[string]string { + return getOpts(e.opts) +} + +func shuffleEndpoints(endpoints []string) { + randGen.Shuffle(len(endpoints), func(i, j int) { + endpoints[i], endpoints[j] = endpoints[j], endpoints[i] + }) +} + +type clientOptions struct { + KeepAliveHeartbeat time.Duration + KeepAliveTimeout time.Duration + RateLimit int + MaxInflight int + ListBatchSize int +} + +func (e *etcdModule) newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error) { + errChan := make(chan error, 10) + + clientOptions := clientOptions{ + KeepAliveHeartbeat: 15 * time.Second, + KeepAliveTimeout: 25 * time.Second, + RateLimit: defaults.KVstoreQPS, + ListBatchSize: 256, + } + + if o, ok := e.opts[EtcdRateLimitOption]; ok && o.value != "" { + clientOptions.RateLimit, _ = strconv.Atoi(o.value) + } + + if o, ok := e.opts[EtcdMaxInflightOption]; ok && o.value != "" { + clientOptions.MaxInflight, _ = strconv.Atoi(o.value) + } + + if clientOptions.MaxInflight == 0 { + clientOptions.MaxInflight = clientOptions.RateLimit + } + + if o, ok := e.opts[EtcdListLimitOption]; ok && o.value != "" { + clientOptions.ListBatchSize, _ = strconv.Atoi(o.value) + } + + if o, ok := e.opts[EtcdOptionKeepAliveTimeout]; ok && o.value != "" { + clientOptions.KeepAliveTimeout, _ = time.ParseDuration(o.value) + } + + if o, ok := e.opts[EtcdOptionKeepAliveHeartbeat]; ok && o.value != "" { + clientOptions.KeepAliveHeartbeat, _ = time.ParseDuration(o.value) + } + + endpointsOpt, endpointsSet := e.opts[EtcdAddrOption] + configPathOpt, configSet := e.opts[EtcdOptionConfig] + + var configPath string + if configSet { + configPath = configPathOpt.value + } + if e.config == nil { + if !endpointsSet && !configSet { + errChan <- fmt.Errorf("invalid etcd configuration, %s or %s must be specified", EtcdOptionConfig, EtcdAddrOption) + close(errChan) + return nil, errChan + } + + if endpointsOpt.value == "" && configPath == "" { + errChan <- fmt.Errorf("invalid etcd configuration, %s or %s must be specified", + EtcdOptionConfig, EtcdAddrOption) + close(errChan) + return nil, errChan + } + + e.config = &client.Config{} + } + + if e.config.Endpoints == nil && endpointsSet { + e.config.Endpoints = []string{endpointsOpt.value} + } + + log.WithFields(logrus.Fields{ + "ConfigPath": configPath, + "KeepAliveHeartbeat": clientOptions.KeepAliveHeartbeat, + "KeepAliveTimeout": clientOptions.KeepAliveTimeout, + "RateLimit": clientOptions.RateLimit, + "MaxInflight": clientOptions.MaxInflight, + "ListLimit": clientOptions.ListBatchSize, + }).Info("Creating etcd client") + + for { + // connectEtcdClient will close errChan when the connection attempt has + // been successful + backend, err := connectEtcdClient(ctx, e.config, configPath, errChan, clientOptions, opts) + switch { + case os.IsNotExist(err): + log.WithError(err).Info("Waiting for all etcd configuration files to be available") + time.Sleep(5 * time.Second) + case err != nil: + errChan <- err + close(errChan) + return backend, errChan + default: + return backend, errChan + } + } +} + +func init() { + // register etcd module for use + registerBackend(EtcdBackendName, etcdInstance) + + if duration := os.Getenv("CILIUM_ETCD_STATUS_CHECK_INTERVAL"); duration != "" { + timeout, err := time.ParseDuration(duration) + if err == nil { + statusCheckTimeout = timeout + } + } + + // Initialize the etcd client logger. + l, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) + if err != nil { + log.WithError(err).Warning("Failed to initialize etcd client logger") + l = zap.NewNop() + } + etcd3ClientLogger = l.Named("etcd-client") +} + +// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level. +// This is a copy of a private etcd client function: +// https://github.com/etcd-io/etcd/blob/v3.5.9/client/v3/logger.go#L47-L59 +func etcdClientDebugLevel() zapcore.Level { + envLevel := os.Getenv("ETCD_CLIENT_DEBUG") + if envLevel == "" || envLevel == "true" { + return zapcore.InfoLevel + } + var l zapcore.Level + if err := l.Set(envLevel); err != nil { + log.Warning("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") + return zapcore.InfoLevel + } + return l +} + +// Hint tries to improve the error message displayed to te user. +func Hint(err error) error { + switch err { + case context.DeadlineExceeded: + return fmt.Errorf("etcd client timeout exceeded") + default: + return err + } +} + +type etcdClient struct { + // firstSession is a channel that will be closed once the first session + // is set up in the etcd client. If an error occurred and the initial + // session cannot be established, the error is provided via the + // channel. + firstSession chan struct{} + + // stopStatusChecker is closed when the status checker can be terminated + stopStatusChecker chan struct{} + + client *client.Client + + // config and configPath are initialized once and never written to again, they can be accessed without locking + config *client.Config + configPath string + + // statusCheckErrors receives all errors reported by statusChecker() + statusCheckErrors chan error + + // protects all sessions and sessionErr from concurrent access + lock.RWMutex + + sessionErr error + + // leaseManager manages the acquisition of etcd leases for generic purposes + leaseManager *etcdLeaseManager + // lockLeaseManager manages the acquisition of etcd leases for locking + // purposes, associated with a shorter TTL + lockLeaseManager *etcdLeaseManager + + // statusLock protects latestStatusSnapshot and latestErrorStatus for + // read/write access + statusLock lock.RWMutex + + // latestStatusSnapshot is a snapshot of the latest etcd cluster status + latestStatusSnapshot string + + // latestErrorStatus is the latest error condition of the etcd connection + latestErrorStatus error + + extraOptions *ExtraOptions + + limiter *ciliumrate.APILimiter + listBatchSize int + + lastHeartbeat time.Time + + leaseExpiredObservers lock.Map[string, func(string)] + + // logger is the scoped logger associated with this client + logger logrus.FieldLogger +} + +type etcdMutex struct { + mutex *concurrency.Mutex + onUnlock func() +} + +func (e *etcdMutex) Unlock(ctx context.Context) error { + e.onUnlock() + return e.mutex.Unlock(ctx) +} + +func (e *etcdMutex) Comparator() interface{} { + return e.mutex.IsOwner() +} + +// StatusCheckErrors returns a channel which receives status check errors +func (e *etcdClient) StatusCheckErrors() <-chan error { + return e.statusCheckErrors +} + +func (e *etcdClient) waitForInitLock(ctx context.Context) <-chan error { + initLockSucceeded := make(chan error) + + go func() { + for { + select { + case <-e.client.Ctx().Done(): + initLockSucceeded <- fmt.Errorf("client context ended: %w", e.client.Ctx().Err()) + close(initLockSucceeded) + return + case <-ctx.Done(): + initLockSucceeded <- fmt.Errorf("caller context ended: %w", ctx.Err()) + close(initLockSucceeded) + return + default: + } + + if e.extraOptions != nil && e.extraOptions.NoLockQuorumCheck { + close(initLockSucceeded) + return + } + + // Generate a random number so that we can acquire a lock even + // if other agents are killed while locking this path. + randNumber := strconv.FormatUint(randGen.Uint64(), 16) + locker, err := e.LockPath(ctx, InitLockPath+"/"+randNumber) + if err == nil { + locker.Unlock(context.Background()) + close(initLockSucceeded) + e.logger.Debug("Distributed lock successful, etcd has quorum") + return + } + + time.Sleep(100 * time.Millisecond) + } + }() + + return initLockSucceeded +} + +func (e *etcdClient) isConnectedAndHasQuorum(ctx context.Context) error { + ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout) + defer cancel() + + select { + // Wait for the the initial connection to be established + case <-e.firstSession: + if err := e.sessionError(); err != nil { + return err + } + // Client is closing + case <-e.client.Ctx().Done(): + return fmt.Errorf("client is closing") + // Timeout while waiting for initial connection, no success + case <-ctxTimeout.Done(): + recordQuorumError("timeout") + return fmt.Errorf("timeout while waiting for initial connection") + } + + initLockSucceeded := e.waitForInitLock(ctxTimeout) + if err := <-initLockSucceeded; err != nil { + recordQuorumError("lock timeout") + return fmt.Errorf("unable to acquire lock: %w", err) + } + + return nil +} + +// Connected closes the returned channel when the etcd client is connected. If +// the context is cancelled or if the etcd client is closed, an error is +// returned on the channel. +func (e *etcdClient) Connected(ctx context.Context) <-chan error { + out := make(chan error) + go func() { + defer close(out) + for { + select { + case <-e.client.Ctx().Done(): + out <- fmt.Errorf("etcd client context ended") + return + case <-ctx.Done(): + out <- ctx.Err() + return + default: + } + if e.isConnectedAndHasQuorum(ctx) == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + }() + return out +} + +// Disconnected closes the returned channel when the etcd client is +// disconnected after being reconnected. Blocks until the etcd client is first +// connected with the kvstore. +func (e *etcdClient) Disconnected() <-chan struct{} { + <-e.firstSession + + for { + session, err := e.lockLeaseManager.GetSession(context.Background(), InitLockPath) + if err == nil { + return session.Done() + } + + e.logger.WithError(err).Warning("Failed to acquire lock session") + time.Sleep(100 * time.Millisecond) + } +} + +func connectEtcdClient(ctx context.Context, config *client.Config, cfgPath string, errChan chan error, clientOptions clientOptions, opts *ExtraOptions) (BackendOperations, error) { + if cfgPath != "" { + cfg, err := newConfig(cfgPath) + if err != nil { + return nil, err + } + if cfg.TLS != nil { + cfg.TLS.GetClientCertificate, err = getClientCertificateReloader(cfgPath) + if err != nil { + return nil, err + } + } + cfg.DialOptions = append(cfg.DialOptions, config.DialOptions...) + config = cfg + } + + // Shuffle the order of endpoints to avoid all agents connecting to the + // same etcd endpoint and to work around etcd client library failover + // bugs. (https://github.com/etcd-io/etcd/pull/9860) + if config.Endpoints != nil { + shuffleEndpoints(config.Endpoints) + } + + // Set client context so that client can be cancelled from outside + config.Context = ctx + // Set DialTimeout to 0, otherwise the creation of a new client will + // block until DialTimeout is reached or a connection to the server + // is made. + config.DialTimeout = 0 + // Ping the server to verify if the server connection is still valid + config.DialKeepAliveTime = clientOptions.KeepAliveHeartbeat + // Timeout if the server does not reply within 15 seconds and close the + // connection. Ideally it should be lower than staleLockTimeout + config.DialKeepAliveTimeout = clientOptions.KeepAliveTimeout + + // Use the shared etcd client logger to prevent unnecessary allocations. + config.Logger = etcd3ClientLogger + + c, err := client.New(*config) + if err != nil { + return nil, err + } + + errorChan := make(chan error) + + limiter := ciliumrate.NewAPILimiter(makeSessionName("etcd", opts), ciliumrate.APILimiterParameters{ + RateLimit: rate.Limit(clientOptions.RateLimit), + RateBurst: clientOptions.RateLimit, + ParallelRequests: clientOptions.MaxInflight, + }, ciliumratemetrics.APILimiterObserver()) + + ec := &etcdClient{ + client: c, + config: config, + configPath: cfgPath, + firstSession: make(chan struct{}), + latestStatusSnapshot: "Waiting for initial connection to be established", + stopStatusChecker: make(chan struct{}), + extraOptions: opts, + limiter: limiter, + listBatchSize: clientOptions.ListBatchSize, + statusCheckErrors: make(chan error, 128), + logger: log.WithFields(logrus.Fields{ + "endpoints": config.Endpoints, + "config": cfgPath, + }), + } + + ec.logger.Info("Connecting to etcd server...") + + leaseTTL := option.Config.KVstoreLeaseTTL + if option.Config.KVstoreLeaseTTL == 0 { + leaseTTL = defaults.KVstoreLeaseTTL + } + + ec.leaseManager = newEtcdLeaseManager(c, leaseTTL, etcdMaxKeysPerLease, ec.expiredLeaseObserver, ec.logger) + ec.lockLeaseManager = newEtcdLeaseManager(c, defaults.LockLeaseTTL, etcdMaxKeysPerLease, nil, ec.logger) + + // create session in parallel as this is a blocking operation + go func() { + ls, err := ec.lockLeaseManager.GetSession(ctx, InitLockPath) + if err != nil { + errorChan <- err + close(errorChan) + return + } + + log.Infof("Got lock lease ID %x", ls.Lease()) + close(errorChan) + }() + + handleSessionError := func(err error) { + ec.RWMutex.Lock() + ec.sessionErr = err + ec.RWMutex.Unlock() + + ec.statusLock.Lock() + ec.latestStatusSnapshot = "Failed to establish initial connection" + ec.latestErrorStatus = err + ec.statusLock.Unlock() + + errChan <- err + ec.statusCheckErrors <- err + } + + // wait for session to be created also in parallel + go func() { + err := func() (err error) { + select { + case err = <-errorChan: + if err != nil { + return err + } + case <-time.After(initialConnectionTimeout): + return fmt.Errorf("timed out while waiting for etcd session. Ensure that etcd is running on %s", config.Endpoints) + } + + ec.logger.Info("Initial etcd session established") + + return nil + }() + + if err != nil { + handleSessionError(err) + close(errChan) + close(ec.firstSession) + close(ec.statusCheckErrors) + return + } + + close(errChan) + close(ec.firstSession) + + go ec.statusChecker() + + watcher := ec.ListAndWatch(ctx, HeartbeatPath, 128) + + for { + select { + case _, ok := <-watcher.Events: + if !ok { + log.Debug("Stopping heartbeat watcher") + watcher.Stop() + return + } + + // It is tempting to compare against the + // heartbeat value stored in the key. However, + // this would require the time on all nodes to + // be synchronized. Instead, assume current + // time and print the heartbeat value in debug + // messages for troubleshooting + ec.RWMutex.Lock() + ec.lastHeartbeat = time.Now() + ec.RWMutex.Unlock() + log.Debug("Received update notification of heartbeat") + case <-ctx.Done(): + return + } + } + }() + + return ec, nil +} + +// makeSessionName builds up a session/locksession controller name +// clusterName is expected to be empty for main kvstore connection +func makeSessionName(sessionPrefix string, opts *ExtraOptions) string { + if opts != nil && opts.ClusterName != "" { + return sessionPrefix + "-" + opts.ClusterName + } + return sessionPrefix +} + +func (e *etcdClient) sessionError() (err error) { + e.RWMutex.RLock() + err = e.sessionErr + e.RWMutex.RUnlock() + return +} + +func (e *etcdClient) LockPath(ctx context.Context, path string) (KVLocker, error) { + // Create the context first, so that the timeout also accounts for the time + // possibly required to acquire a new session (if not already established). + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + session, err := e.lockLeaseManager.GetSession(ctx, path) + if err != nil { + return nil, Hint(err) + } + + mu := concurrency.NewMutex(session, path) + err = mu.Lock(ctx) + if err != nil { + e.lockLeaseManager.CancelIfExpired(err, session.Lease()) + return nil, Hint(err) + } + + release := func() { e.lockLeaseManager.Release(path) } + return &etcdMutex{mutex: mu, onUnlock: release}, nil +} + +func (e *etcdClient) DeletePrefix(ctx context.Context, path string) (err error) { + defer func() { + Trace("DeletePrefix", err, logrus.Fields{fieldPrefix: path}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + + defer func(duration *spanstat.SpanStat) { + increaseMetric(path, metricDelete, "DeletePrefix", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + _, err = e.client.Delete(ctx, path, client.WithPrefix()) + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + + if err == nil { + e.leaseManager.ReleasePrefix(path) + } + + return Hint(err) +} + +// Watch starts watching for changes in a prefix +func (e *etcdClient) Watch(ctx context.Context, w *Watcher) { + localCache := watcherCache{} + listSignalSent := false + + defer func() { + close(w.Events) + w.stopWait.Done() + + // The watch might be aborted by closing + // the context instead of calling + // w.Stop() from outside. In that case + // we make sure to close everything and + // as this uses sync.Once it can be + // run multiple times (if that's the case). + w.Stop() + }() + + scopedLog := e.logger.WithField(fieldPrefix, w.Prefix) + scopedLog.Debug("Starting watcher...") + + err := <-e.Connected(ctx) + if err != nil { + // The context ended or the etcd client was closed + // before connectivity was achieved + return + } + + // errLimiter is used to rate limit the retry of the first Get request in case an error + // has occurred, to prevent overloading the etcd server due to the more aggressive + // default rate limiter. + errLimiter := backoff.Exponential{ + Name: "etcd-list-before-watch-error", + Min: 50 * time.Millisecond, + Max: 1 * time.Minute, + } + + if e.extraOptions != nil { + errLimiter.NodeManager = backoff.NewNodeManager(e.extraOptions.ClusterSizeDependantInterval) + } + +reList: + for { + select { + case <-e.client.Ctx().Done(): + return + case <-ctx.Done(): + return + default: + } + + lr, err := e.limiter.Wait(ctx) + if err != nil { + continue + } + kvs, revision, err := e.paginatedList(ctx, scopedLog, w.Prefix) + if err != nil { + lr.Error(err) + scopedLog.WithError(Hint(err)).Warn("Unable to list keys before starting watcher") + errLimiter.Wait(ctx) + continue + } + lr.Done() + errLimiter.Reset() + + for _, key := range kvs { + t := EventTypeCreate + if localCache.Exists(key.Key) { + t = EventTypeModify + } + + localCache.MarkInUse(key.Key) + + if traceEnabled { + scopedLog.Debugf("Emitting list result as %s event for %s=%s", t, key.Key, key.Value) + } + + queueStart := spanstat.Start() + w.Events <- KeyValueEvent{ + Key: string(key.Key), + Value: key.Value, + Typ: t, + } + trackEventQueued(string(key.Key), t, queueStart.End(true).Total()) + } + + nextRev := revision + 1 + + // Send out deletion events for all keys that were deleted + // between our last known revision and the latest revision + // received via Get + localCache.RemoveDeleted(func(k string) { + event := KeyValueEvent{ + Key: k, + Typ: EventTypeDelete, + } + + if traceEnabled { + scopedLog.Debugf("Emitting EventTypeDelete event for %s", k) + } + + queueStart := spanstat.Start() + w.Events <- event + trackEventQueued(k, EventTypeDelete, queueStart.End(true).Total()) + }) + + // Only send the list signal once + if !listSignalSent { + w.Events <- KeyValueEvent{Typ: EventTypeListDone} + listSignalSent = true + } + + recreateWatcher: + scopedLog.WithField(fieldRev, nextRev).Debug("Starting to watch a prefix") + + lr, err = e.limiter.Wait(ctx) + if err != nil { + select { + case <-e.client.Ctx().Done(): + return + case <-ctx.Done(): + return + default: + goto recreateWatcher + } + } + + etcdWatch := e.client.Watch(client.WithRequireLeader(ctx), w.Prefix, + client.WithPrefix(), client.WithRev(nextRev)) + lr.Done() + + for { + select { + case <-e.client.Ctx().Done(): + return + case <-ctx.Done(): + return + case <-w.stopWatch: + return + case r, ok := <-etcdWatch: + if !ok { + time.Sleep(50 * time.Millisecond) + goto recreateWatcher + } + + scopedLog := scopedLog.WithField(fieldRev, r.Header.Revision) + + if err := r.Err(); err != nil { + // We tried to watch on a compacted + // revision that may no longer exist, + // recreate the watcher and try to + // watch on the next possible revision + if errors.Is(err, v3rpcErrors.ErrCompacted) { + scopedLog.WithError(Hint(err)).Debug("Tried watching on compacted revision") + } + + // mark all local keys in state for + // deletion unless the upcoming GET + // marks them alive + localCache.MarkAllForDeletion() + + goto reList + } + + nextRev = r.Header.Revision + 1 + if traceEnabled { + scopedLog.Debugf("Received event from etcd: %+v", r) + } + + for _, ev := range r.Events { + event := KeyValueEvent{ + Key: string(ev.Kv.Key), + Value: ev.Kv.Value, + } + + switch { + case ev.Type == client.EventTypeDelete: + event.Typ = EventTypeDelete + localCache.RemoveKey(ev.Kv.Key) + case ev.IsCreate(): + event.Typ = EventTypeCreate + localCache.MarkInUse(ev.Kv.Key) + default: + event.Typ = EventTypeModify + localCache.MarkInUse(ev.Kv.Key) + } + + if traceEnabled { + scopedLog.Debugf("Emitting %s event for %s=%s", event.Typ, event.Key, event.Value) + } + + queueStart := spanstat.Start() + w.Events <- event + trackEventQueued(string(ev.Kv.Key), event.Typ, queueStart.End(true).Total()) + } + } + } + } +} + +func (e *etcdClient) paginatedList(ctx context.Context, log *logrus.Entry, prefix string) (kvs []*mvccpb.KeyValue, revision int64, err error) { + start, end := prefix, client.GetPrefixRangeEnd(prefix) + + for { + res, err := e.client.Get(ctx, start, client.WithRange(end), + client.WithSort(client.SortByKey, client.SortAscend), + client.WithRev(revision), client.WithSerializable(), + client.WithLimit(int64(e.listBatchSize)), + ) + if err != nil { + return nil, 0, err + } + + log.WithFields(logrus.Fields{ + fieldNumEntries: len(res.Kvs), + fieldRemainingEntries: res.Count - int64(len(res.Kvs)), + }).Debug("Received list response from etcd") + + if kvs == nil { + kvs = make([]*mvccpb.KeyValue, 0, res.Count) + } + + kvs = append(kvs, res.Kvs...) + + revision = res.Header.Revision + if !res.More || len(res.Kvs) == 0 { + return kvs, revision, nil + } + + start = string(res.Kvs[len(res.Kvs)-1].Key) + "\x00" + } +} + +func (e *etcdClient) determineEndpointStatus(ctx context.Context, endpointAddress string) (string, error) { + ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout) + defer cancel() + + e.logger.Debugf("Checking status to etcd endpoint %s", endpointAddress) + + status, err := e.client.Status(ctxTimeout, endpointAddress) + if err != nil { + return fmt.Sprintf("%s - %s", endpointAddress, err), Hint(err) + } + + str := fmt.Sprintf("%s - %s", endpointAddress, status.Version) + if status.Header.MemberId == status.Leader { + str += " (Leader)" + } + + return str, nil +} + +func (e *etcdClient) statusChecker() { + ctx := context.Background() + + consecutiveQuorumErrors := 0 + + statusTimer, statusTimerDone := inctimer.New() + defer statusTimerDone() + + for { + newStatus := []string{} + ok := 0 + + quorumError := e.isConnectedAndHasQuorum(ctx) + + endpoints := e.client.Endpoints() + for _, ep := range endpoints { + st, err := e.determineEndpointStatus(ctx, ep) + if err == nil { + ok++ + } + + newStatus = append(newStatus, st) + } + + allConnected := len(endpoints) == ok + + e.RWMutex.RLock() + lastHeartbeat := e.lastHeartbeat + e.RWMutex.RUnlock() + + if heartbeatDelta := time.Since(lastHeartbeat); !lastHeartbeat.IsZero() && heartbeatDelta > 2*HeartbeatWriteInterval { + recordQuorumError("no event received") + quorumError = fmt.Errorf("%s since last heartbeat update has been received", heartbeatDelta) + } + + quorumString := "true" + if quorumError != nil { + quorumString = quorumError.Error() + consecutiveQuorumErrors++ + quorumString += fmt.Sprintf(", consecutive-errors=%d", consecutiveQuorumErrors) + } else { + consecutiveQuorumErrors = 0 + } + + e.statusLock.Lock() + + switch { + case consecutiveQuorumErrors > option.Config.KVstoreMaxConsecutiveQuorumErrors: + e.latestErrorStatus = fmt.Errorf("quorum check failed %d times in a row: %s", + consecutiveQuorumErrors, quorumError) + e.latestStatusSnapshot = e.latestErrorStatus.Error() + case len(endpoints) > 0 && ok == 0: + e.latestErrorStatus = fmt.Errorf("not able to connect to any etcd endpoints") + e.latestStatusSnapshot = e.latestErrorStatus.Error() + default: + e.latestErrorStatus = nil + e.latestStatusSnapshot = fmt.Sprintf("etcd: %d/%d connected, leases=%d, lock leases=%d, has-quorum=%s: %s", + ok, len(endpoints), e.leaseManager.TotalLeases(), e.leaseManager.TotalLeases(), quorumString, strings.Join(newStatus, "; ")) + } + + e.statusLock.Unlock() + if e.latestErrorStatus != nil { + select { + case e.statusCheckErrors <- e.latestErrorStatus: + default: + // Channel's buffer is full, skip sending errors to the channel but log warnings instead + log.WithError(e.latestErrorStatus). + Warning("Status check error channel is full, dropping this error") + } + } + + select { + case <-e.stopStatusChecker: + close(e.statusCheckErrors) + return + case <-statusTimer.After(e.extraOptions.StatusCheckInterval(allConnected)): + } + } +} + +func (e *etcdClient) Status() (string, error) { + e.statusLock.RLock() + defer e.statusLock.RUnlock() + + return e.latestStatusSnapshot, Hint(e.latestErrorStatus) +} + +// GetIfLocked returns value of key if the client is still holding the given lock. +func (e *etcdClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) (bv []byte, err error) { + defer func() { + Trace("GetIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricRead, "GetLocked", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + opGet := client.OpGet(key) + cmp := lock.Comparator().(client.Cmp) + txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit() + if err == nil && !txnReply.Succeeded { + err = ErrLockLeaseExpired + } + + if err != nil { + lr.Error(err) + return nil, Hint(err) + } + + lr.Done() + getR := txnReply.Responses[0].GetResponseRange() + // RangeResponse + if getR.Count == 0 { + return nil, nil + } + return getR.Kvs[0].Value, nil +} + +// Get returns value of key +func (e *etcdClient) Get(ctx context.Context, key string) (bv []byte, err error) { + defer func() { + Trace("Get", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + getR, err := e.client.Get(ctx, key) + if err != nil { + lr.Error(err) + return nil, Hint(err) + } + lr.Done() + + if getR.Count == 0 { + return nil, nil + } + return getR.Kvs[0].Value, nil +} + +// GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock. +func (e *etcdClient) GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (k string, bv []byte, err error) { + defer func() { + Trace("GetPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return "", nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(prefix, metricRead, "GetPrefixLocked", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + opGet := client.OpGet(prefix, client.WithPrefix(), client.WithLimit(1)) + cmp := lock.Comparator().(client.Cmp) + txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit() + if err == nil && !txnReply.Succeeded { + err = ErrLockLeaseExpired + } + + if err != nil { + lr.Error(err) + return "", nil, Hint(err) + } + lr.Done() + + getR := txnReply.Responses[0].GetResponseRange() + + if getR.Count == 0 { + return "", nil, nil + } + return string(getR.Kvs[0].Key), getR.Kvs[0].Value, nil +} + +// GetPrefix returns the first key which matches the prefix and its value +func (e *etcdClient) GetPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) { + defer func() { + Trace("GetPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return "", nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(prefix, metricRead, "GetPrefix", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + getR, err := e.client.Get(ctx, prefix, client.WithPrefix(), client.WithLimit(1)) + if err != nil { + lr.Error(err) + return "", nil, Hint(err) + } + lr.Done() + + if getR.Count == 0 { + return "", nil, nil + } + return string(getR.Kvs[0].Key), getR.Kvs[0].Value, nil +} + +// Set sets value of key +func (e *etcdClient) Set(ctx context.Context, key string, value []byte) (err error) { + defer func() { + Trace("Set", err, logrus.Fields{fieldKey: key, fieldValue: string(value)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricSet, "Set", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + _, err = e.client.Put(ctx, key, string(value)) + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + return Hint(err) +} + +// DeleteIfLocked deletes a key if the client is still holding the given lock. +func (e *etcdClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) (err error) { + defer func() { + Trace("DeleteIfLocked", err, logrus.Fields{fieldKey: key}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricDelete, "DeleteLocked", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + opDel := client.OpDelete(key) + cmp := lock.Comparator().(client.Cmp) + txnReply, err := e.client.Txn(ctx).If(cmp).Then(opDel).Commit() + if err == nil && !txnReply.Succeeded { + err = ErrLockLeaseExpired + } + if err == nil { + e.leaseManager.Release(key) + } + + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + return Hint(err) +} + +// Delete deletes a key +func (e *etcdClient) Delete(ctx context.Context, key string) (err error) { + defer func() { + Trace("Delete", err, logrus.Fields{fieldKey: key}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricDelete, "Delete", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + _, err = e.client.Delete(ctx, key) + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + + if err == nil { + e.leaseManager.Release(key) + } + + return Hint(err) +} + +func (e *etcdClient) createOpPut(key string, value []byte, leaseID client.LeaseID) *client.Op { + if leaseID != 0 { + op := client.OpPut(key, string(value), client.WithLease(leaseID)) + return &op + } + + op := client.OpPut(key, string(value)) + return &op +} + +// UpdateIfLocked updates a key if the client is still holding the given lock. +func (e *etcdClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (err error) { + defer func() { + Trace("UpdateIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease}) + }() + var leaseID client.LeaseID + if lease { + leaseID, err = e.leaseManager.GetLeaseID(ctx, key) + if err != nil { + return Hint(err) + } + } + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricSet, "UpdateIfLocked", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + var txnReply *client.TxnResponse + opPut := client.OpPut(key, string(value), client.WithLease(leaseID)) + cmp := lock.Comparator().(client.Cmp) + txnReply, err = e.client.Txn(ctx).If(cmp).Then(opPut).Commit() + e.leaseManager.CancelIfExpired(err, leaseID) + + if err == nil && !txnReply.Succeeded { + err = ErrLockLeaseExpired + } + + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + return Hint(err) +} + +// Update creates or updates a key +func (e *etcdClient) Update(ctx context.Context, key string, value []byte, lease bool) (err error) { + defer func() { + Trace("Update", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease}) + }() + var leaseID client.LeaseID + if lease { + leaseID, err = e.leaseManager.GetLeaseID(ctx, key) + if err != nil { + return Hint(err) + } + } + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricSet, "Update", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + _, err = e.client.Put(ctx, key, string(value), client.WithLease(leaseID)) + e.leaseManager.CancelIfExpired(err, leaseID) + + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + return Hint(err) +} + +// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock. +func (e *etcdClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (recreated bool, err error) { + defer func() { + Trace("UpdateIfDifferentIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return false, Hint(err) + } + duration := spanstat.Start() + + cnds := lock.Comparator().(client.Cmp) + txnresp, err := e.client.Txn(ctx).If(cnds).Then(client.OpGet(key)).Commit() + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err) + + // On error, attempt update blindly + if err != nil { + return true, e.UpdateIfLocked(ctx, key, value, lease, lock) + } + + if !txnresp.Succeeded { + return false, ErrLockLeaseExpired + } + + getR := txnresp.Responses[0].GetResponseRange() + if getR.Count == 0 { + return true, e.UpdateIfLocked(ctx, key, value, lease, lock) + } + + if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) { + return true, e.UpdateIfLocked(ctx, key, value, lease, lock) + } + // if value is not equal then update. + if !bytes.Equal(getR.Kvs[0].Value, value) { + return true, e.UpdateIfLocked(ctx, key, value, lease, lock) + } + + return false, nil +} + +// UpdateIfDifferent updates a key if the value is different +func (e *etcdClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (recreated bool, err error) { + defer func() { + Trace("UpdateIfDifferent", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return false, Hint(err) + } + duration := spanstat.Start() + + getR, err := e.client.Get(ctx, key) + // Using lr.Error for convenience, as it matches lr.Done() when err is nil + lr.Error(err) + increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err) + // On error, attempt update blindly + if err != nil || getR.Count == 0 { + return true, e.Update(ctx, key, value, lease) + } + if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) { + return true, e.Update(ctx, key, value, lease) + } + // if value is not equal then update. + if !bytes.Equal(getR.Kvs[0].Value, value) { + return true, e.Update(ctx, key, value, lease) + } + + return false, nil +} + +// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists +func (e *etcdClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (success bool, err error) { + defer func() { + Trace("CreateOnlyIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success}) + }() + var leaseID client.LeaseID + if lease { + leaseID, err = e.leaseManager.GetLeaseID(ctx, key) + if err != nil { + return false, Hint(err) + } + } + lr, err := e.limiter.Wait(ctx) + if err != nil { + return false, Hint(err) + } + duration := spanstat.Start() + + req := e.createOpPut(key, value, leaseID) + cnds := []client.Cmp{ + client.Compare(client.Version(key), "=", 0), + lock.Comparator().(client.Cmp), + } + + // We need to do a get in the else of the txn to detect if the lock is still + // valid or not. + opGets := []client.Op{ + client.OpGet(key), + } + txnresp, err := e.client.Txn(ctx).If(cnds...).Then(*req).Else(opGets...).Commit() + increaseMetric(key, metricSet, "CreateOnlyLocked", duration.EndError(err).Total(), err) + if err != nil { + lr.Error(err) + e.leaseManager.CancelIfExpired(err, leaseID) + return false, Hint(err) + } + lr.Done() + + // The txn can failed for the following reasons: + // - Key version is not zero; + // - Lock does not exist or is expired. + // For both of those cases, the key that we are comparing might or not + // exist, so we have: + // A - Key does not exist and lock does not exist => ErrLockLeaseExpired + // B - Key does not exist and lock exist => txn should succeed + // C - Key does exist, version is == 0 and lock does not exist => ErrLockLeaseExpired + // D - Key does exist, version is != 0 and lock does not exist => ErrLockLeaseExpired + // E - Key does exist, version is == 0 and lock does exist => txn should succeed + // F - Key does exist, version is != 0 and lock does exist => txn fails but returned is nil! + + if !txnresp.Succeeded { + // case F + if len(txnresp.Responses[0].GetResponseRange().Kvs) != 0 && + txnresp.Responses[0].GetResponseRange().Kvs[0].Version != 0 { + return false, nil + } + + // case A, C and D + return false, ErrLockLeaseExpired + } + + // case B and E + return true, nil +} + +// CreateOnly creates a key with the value and will fail if the key already exists +func (e *etcdClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (success bool, err error) { + defer func() { + Trace("CreateOnly", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success}) + }() + var leaseID client.LeaseID + if lease { + leaseID, err = e.leaseManager.GetLeaseID(ctx, key) + if err != nil { + return false, Hint(err) + } + } + lr, err := e.limiter.Wait(ctx) + if err != nil { + return false, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(key, metricSet, "CreateOnly", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + req := e.createOpPut(key, value, leaseID) + cond := client.Compare(client.Version(key), "=", 0) + + txnresp, err := e.client.Txn(ctx).If(cond).Then(*req).Commit() + + if err != nil { + lr.Error(err) + e.leaseManager.CancelIfExpired(err, leaseID) + return false, Hint(err) + } + + lr.Done() + return txnresp.Succeeded, nil +} + +// CreateIfExists creates a key with the value only if key condKey exists +func (e *etcdClient) CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) (err error) { + defer func() { + Trace("CreateIfExists", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldCondition: condKey, fieldAttachLease: lease}) + }() + var leaseID client.LeaseID + if lease { + leaseID, err = e.leaseManager.GetLeaseID(ctx, key) + if err != nil { + return Hint(err) + } + } + lr, err := e.limiter.Wait(ctx) + if err != nil { + return Hint(err) + } + duration := spanstat.Start() + + req := e.createOpPut(key, value, leaseID) + cond := client.Compare(client.Version(condKey), "!=", 0) + txnresp, err := e.client.Txn(ctx).If(cond).Then(*req).Commit() + + increaseMetric(key, metricSet, "CreateIfExists", duration.EndError(err).Total(), err) + if err != nil { + lr.Error(err) + e.leaseManager.CancelIfExpired(err, leaseID) + return Hint(err) + } + lr.Done() + + if !txnresp.Succeeded { + return fmt.Errorf("create was unsuccessful") + } + + return nil +} + +// FIXME: When we rebase to etcd 3.3 +// +// DeleteOnZeroCount deletes the key if no matching keys for prefix exist +//func (e *etcdClient) DeleteOnZeroCount(key, prefix string) error { +// txnresp, err := e.client.Txn(ctx.TODO()). +// If(client.Compare(client.Version(prefix).WithPrefix(), "=", 0)). +// Then(client.OpDelete(key)). +// Commit() +// if err != nil { +// return err +// } +// +// if txnresp.Succeeded == false { +// return fmt.Errorf("delete was unsuccessful") +// } +// +// return nil +//} + +// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock. +func (e *etcdClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (v KeyValuePairs, err error) { + defer func() { + Trace("ListPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(prefix, metricRead, "ListPrefixLocked", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + opGet := client.OpGet(prefix, client.WithPrefix()) + cmp := lock.Comparator().(client.Cmp) + txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit() + if err == nil && !txnReply.Succeeded { + err = ErrLockLeaseExpired + } + if err != nil { + lr.Error(err) + return nil, Hint(err) + } + lr.Done() + getR := txnReply.Responses[0].GetResponseRange() + + pairs := KeyValuePairs(make(map[string]Value, getR.Count)) + for i := int64(0); i < getR.Count; i++ { + pairs[string(getR.Kvs[i].Key)] = Value{ + Data: getR.Kvs[i].Value, + ModRevision: uint64(getR.Kvs[i].ModRevision), + } + + } + + return pairs, nil +} + +// ListPrefix returns a map of matching keys +func (e *etcdClient) ListPrefix(ctx context.Context, prefix string) (v KeyValuePairs, err error) { + defer func() { + Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) + }() + lr, err := e.limiter.Wait(ctx) + if err != nil { + return nil, Hint(err) + } + defer func(duration *spanstat.SpanStat) { + increaseMetric(prefix, metricRead, "ListPrefix", duration.EndError(err).Total(), err) + }(spanstat.Start()) + + getR, err := e.client.Get(ctx, prefix, client.WithPrefix()) + if err != nil { + lr.Error(err) + return nil, Hint(err) + } + lr.Done() + + pairs := KeyValuePairs(make(map[string]Value, getR.Count)) + for i := int64(0); i < getR.Count; i++ { + pairs[string(getR.Kvs[i].Key)] = Value{ + Data: getR.Kvs[i].Value, + ModRevision: uint64(getR.Kvs[i].ModRevision), + LeaseID: getR.Kvs[i].Lease, + } + + } + + return pairs, nil +} + +// Close closes the etcd session +func (e *etcdClient) Close(ctx context.Context) { + close(e.stopStatusChecker) + + if err := e.client.Close(); err != nil { + e.logger.WithError(err).Warning("Failed to close etcd client") + } + + // Wait until all child goroutines spawned by the lease managers have terminated. + e.leaseManager.Wait() + e.lockLeaseManager.Wait() +} + +// GetCapabilities returns the capabilities of the backend +func (e *etcdClient) GetCapabilities() Capabilities { + return Capabilities(CapabilityCreateIfExists) +} + +// Encode encodes a binary slice into a character set that the backend supports +func (e *etcdClient) Encode(in []byte) (out string) { + return string(in) +} + +// Decode decodes a key previously encoded back into the original binary slice +func (e *etcdClient) Decode(in string) (out []byte, err error) { + return []byte(in), nil +} + +// ListAndWatch implements the BackendOperations.ListAndWatch using etcd +func (e *etcdClient) ListAndWatch(ctx context.Context, prefix string, chanSize int) *Watcher { + w := newWatcher(prefix, chanSize) + + go e.Watch(ctx, w) + + return w +} + +// RegisterLeaseExpiredObserver registers a function which is executed when +// the lease associated with a key having the given prefix is detected as expired. +// If the function is nil, the previous observer (if any) is unregistered. +func (e *etcdClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) { + if fn == nil { + e.leaseExpiredObservers.Delete(prefix) + } else { + e.leaseExpiredObservers.Store(prefix, fn) + } +} + +func (e *etcdClient) expiredLeaseObserver(key string) { + e.leaseExpiredObservers.Range(func(prefix string, fn func(string)) bool { + if strings.HasPrefix(key, prefix) { + fn(key) + } + return true + }) +} + +// UserEnforcePresence creates a user in etcd if not already present, and grants the specified roles. +func (e *etcdClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error { + scopedLog := e.logger.WithField(FieldUser, name) + + scopedLog.Debug("Creating user") + _, err := e.client.Auth.UserAddWithOptions(ctx, name, "", &client.UserAddOptions{NoPassword: true}) + if err != nil { + if errors.Is(err, v3rpcErrors.ErrUserAlreadyExist) { + scopedLog.Debug("User already exists") + } else { + return err + } + } + + for _, role := range roles { + scopedLog.WithField(FieldRole, role).Debug("Granting role to user") + + _, err := e.client.Auth.UserGrantRole(ctx, name, role) + if err != nil { + return err + } + } + + return nil +} + +// UserEnforcePresence deletes a user from etcd, if present. +func (e *etcdClient) UserEnforceAbsence(ctx context.Context, name string) error { + scopedLog := e.logger.WithField(FieldUser, name) + + scopedLog.Debug("Deleting user") + _, err := e.client.Auth.UserDelete(ctx, name) + if err != nil { + if errors.Is(err, v3rpcErrors.ErrUserNotFound) { + scopedLog.Debug("User not found") + } else { + return err + } + } + + return nil +} + +// SplitK8sServiceURL returns the service name and namespace for the given address. +// If the given address is not parseable or it is not the format +// '://>.[optional]', returns an error. +func SplitK8sServiceURL(address string) (string, string, error) { + u, err := url.Parse(address) + if err != nil { + return "", "", err + } + // typical service name "cilium-etcd-client.kube-system.svc" + names := strings.Split(u.Hostname(), ".") + if len(names) >= 2 { + return names[0], names[1], nil + } + return "", "", + fmt.Errorf("invalid service name. expecting .[optional], got: %s", address) +} + +// IsEtcdOperator returns the service name if the configuration is setting up an +// etcd-operator. If the configuration explicitly states it is configured +// to connect to an etcd operator, e.g. with etcd.operator=true, the returned +// service name is the first found within the configuration specified. +func IsEtcdOperator(selectedBackend string, opts map[string]string, k8sNamespace string) (string, bool) { + if selectedBackend != EtcdBackendName { + return "", false + } + + isEtcdOperator := strings.ToLower(opts[isEtcdOperatorOption]) == "true" + + fqdnIsEtcdOperator := func(address string) bool { + svcName, ns, err := SplitK8sServiceURL(address) + return err == nil && + svcName == "cilium-etcd-client" && + ns == k8sNamespace + } + + fqdn := opts[EtcdAddrOption] + if len(fqdn) != 0 { + if fqdnIsEtcdOperator(fqdn) || isEtcdOperator { + return fqdn, true + } + return "", false + } + + bm := newEtcdModule() + err := bm.setConfig(opts) + if err != nil { + return "", false + } + etcdConfig := bm.getConfig()[EtcdOptionConfig] + if len(etcdConfig) == 0 { + return "", false + } + + cfg, err := newConfig(etcdConfig) + if err != nil { + log.WithError(err).Error("Unable to read etcd configuration.") + return "", false + } + for _, endpoint := range cfg.Endpoints { + if fqdnIsEtcdOperator(endpoint) || isEtcdOperator { + return endpoint, true + } + } + + return "", false +} + +// newConfig is a wrapper of clientyaml.NewConfig. Since etcd has deprecated +// the `ca-file` field from yamlConfig in v3.4, the clientyaml.NewConfig won't +// read that field from the etcd configuration file making Cilium fail to +// connect to a TLS-enabled etcd server. Since we should have deprecated the +// usage of this field a long time ago, in this galaxy, we will have this +// wrapper function as a workaround which will still use the `ca-file` field to +// avoid users breaking their connectivity to etcd when upgrading Cilium. +// TODO remove this wrapper in cilium >= 1.8 +func newConfig(fpath string) (*client.Config, error) { + cfg, err := clientyaml.NewConfig(fpath) + if err != nil { + return nil, err + } + if cfg.TLS == nil || cfg.TLS.RootCAs != nil { + return cfg, nil + } + + yc := &yamlConfig{} + b, err := os.ReadFile(fpath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(b, yc) + if err != nil { + return nil, err + } + if yc.InsecureTransport { + return cfg, nil + } + + if yc.CAfile != "" { + cp, err := tlsutil.NewCertPool([]string{yc.CAfile}) + if err != nil { + return nil, err + } + cfg.TLS.RootCAs = cp + } + return cfg, nil +} + +// reload on-disk certificate and key when needed +func getClientCertificateReloader(fpath string) (func(*tls.CertificateRequestInfo) (*tls.Certificate, error), error) { + yc := &yamlKeyPairConfig{} + b, err := os.ReadFile(fpath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(b, yc) + if err != nil { + return nil, err + } + if yc.Certfile == "" || yc.Keyfile == "" { + return nil, nil + } + reloader := func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cer, err := tls.LoadX509KeyPair(yc.Certfile, yc.Keyfile) + return &cer, err + } + return reloader, nil +} + +// copy of relevant internal structure fields in go.etcd.io/etcd/clientv3/yaml +// needed to implement certificates reload, not depending on the deprecated +// newconfig/yamlConfig. +type yamlKeyPairConfig struct { + Certfile string `json:"cert-file"` + Keyfile string `json:"key-file"` +} + +// copy of the internal structure in github.com/etcd-io/etcd/clientv3/yaml so we +// can still use the `ca-file` field for one more release. +type yamlConfig struct { + client.Config + + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` + Certfile string `json:"cert-file"` + Keyfile string `json:"key-file"` + TrustedCAfile string `json:"trusted-ca-file"` + + // CAfile is being deprecated. Use 'TrustedCAfile' instead. + // TODO: deprecate this in v4 + CAfile string `json:"ca-file"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go new file mode 100644 index 0000000000..2e0a2f6d97 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "errors" + "strings" + "sync" + + "github.com/sirupsen/logrus" + v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + client "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/concurrency" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/spanstat" + "github.com/cilium/cilium/pkg/time" +) + +type leaseInfo struct { + count uint32 + session *concurrency.Session +} + +// etcdLeaseManager manages the acquisition of the leases, and keeps track of +// which lease is attached to which etcd key. +type etcdLeaseManager struct { + client *client.Client + log logrus.FieldLogger + + ttl time.Duration + limit uint32 + expired func(key string) + + mu lock.RWMutex + leases map[client.LeaseID]*leaseInfo + keys map[string]client.LeaseID + current client.LeaseID + + acquiring chan struct{} + wg sync.WaitGroup +} + +// newEtcdLeaseManager builds and returns a new lease manager instance. +func newEtcdLeaseManager(cl *client.Client, ttl time.Duration, limit uint32, expired func(key string), log logrus.FieldLogger) *etcdLeaseManager { + return &etcdLeaseManager{ + client: cl, + log: log, + + ttl: ttl, + limit: limit, + expired: expired, + + current: client.NoLease, + leases: make(map[client.LeaseID]*leaseInfo), + keys: make(map[string]client.LeaseID), + } +} + +// GetLeaseID returns a lease ID, and associates it to the given key. It leverages +// one of the already acquired leases if they are not already attached to too many +// keys, otherwise a new one is acquired. +// +// There's a small possibility that the returned lease is already expired, or gets +// expired immediately before use (due the time window between the lease expiration +// on the etcd server and the subsequent client side detection and garbage collection). +// As we cannot completely remove this uncertainty period, let's adopt the easiest +// approach here, without explicitly checking if the lease is expired before returning +// it (given that it would be a client-side check only). Instead, let's just rely on +// the fact that the operation will fail (as the lease is no longer valid), triggering +// a retry. At that point, a new (hopefully valid) lease will be retrieved again. +func (elm *etcdLeaseManager) GetLeaseID(ctx context.Context, key string) (client.LeaseID, error) { + session, err := elm.GetSession(ctx, key) + if err != nil { + return client.NoLease, err + } + + return session.Lease(), nil +} + +// GetSession returns a session, and associates it to the given key. It leverages +// one of the already acquired leases if they are not already attached to too many +// keys, otherwise a new one is acquired. +// +// There's a small possibility that the returned session is already expired, or gets +// expired immediately before use (due the time window between the lease expiration +// on the etcd server and the subsequent client side detection and garbage collection). +// As we cannot completely remove this uncertainty period, let's adopt the easiest +// approach here, without explicitly checking if the session is expired before returning +// it (given that it would be a client-side check only). Instead, let's just rely on +// the fact that the operation will fail (as the lease is no longer valid), triggering +// a retry. At that point, a new (hopefully valid) session will be retrieved again. +func (elm *etcdLeaseManager) GetSession(ctx context.Context, key string) (*concurrency.Session, error) { + elm.mu.Lock() + + // This key is already attached to a lease, hence just return it. + if leaseID := elm.keys[key]; leaseID != client.NoLease { + // The entry is guaranteed to exist if the lease is associated with a key + info := elm.leases[leaseID] + elm.mu.Unlock() + return info.session, nil + } + + // Return the current lease if it has not been used more than limit times + if info := elm.leases[elm.current]; info != nil && info.count < elm.limit { + info.count++ + elm.keys[key] = elm.current + elm.mu.Unlock() + + return info.session, nil + } + + // Otherwise, loop through the other known leases to see if any has been released + for lease, info := range elm.leases { + if info.count < elm.limit { + elm.current = lease + info.count++ + elm.keys[key] = elm.current + elm.mu.Unlock() + + return info.session, nil + } + } + + // If none is found, we need to acquire a new lease. acquiring is a channel + // used to detect whether we are already in the process of acquiring a new + // lease, to prevent multiple acquisitions in parallel. + acquiring := elm.acquiring + if acquiring == nil { + elm.acquiring = make(chan struct{}) + } + + // Unlock, so that we don't block other paraller operations (e.g., releases) + // while acquiring a new lease, since it might be a slow operation. + elm.mu.Unlock() + + // Someone else is already acquiring a new lease. Wait until + // it completes, and then retry again. + if acquiring != nil { + select { + case <-acquiring: + return elm.GetSession(ctx, key) + case <-ctx.Done(): + return nil, ctx.Err() + case <-elm.client.Ctx().Done(): + return nil, elm.client.Ctx().Err() + } + } + + // Otherwise, we can proceed to acquire a new lease. + session, err := elm.newSession(ctx) + + elm.mu.Lock() + + // Signal that the acquisition process has completed. + close(elm.acquiring) + elm.acquiring = nil + + if err != nil { + elm.mu.Unlock() + return nil, err + } + + elm.current = session.Lease() + elm.leases[session.Lease()] = &leaseInfo{session: session} + elm.mu.Unlock() + + return elm.GetSession(ctx, key) +} + +// Release decrements the counter of the lease attached to the given key. +func (elm *etcdLeaseManager) Release(key string) { + elm.mu.Lock() + defer elm.mu.Unlock() + + elm.releaseUnlocked(key) +} + +// ReleasePrefix decrements the counter of the leases attached to the keys +// starting with the given prefix. +func (elm *etcdLeaseManager) ReleasePrefix(prefix string) { + elm.mu.Lock() + defer elm.mu.Unlock() + + for key, leaseID := range elm.keys { + if strings.HasPrefix(key, prefix) { + if info := elm.leases[leaseID]; info != nil && info.count > 0 { + info.count-- + } + delete(elm.keys, key) + } + } +} + +// KeyHasLease returns whether the given key is associated with the specified lease. +func (elm *etcdLeaseManager) KeyHasLease(key string, leaseID client.LeaseID) bool { + elm.mu.RLock() + defer elm.mu.RUnlock() + + return elm.keys[key] == leaseID +} + +// CancelIfExpired verifies whether the error reports that the given lease has +// expired, and in that case aborts the corresponding keepalive process. +func (elm *etcdLeaseManager) CancelIfExpired(err error, leaseID client.LeaseID) { + if errors.Is(err, v3rpcErrors.ErrLeaseNotFound) { + elm.mu.Lock() + if info := elm.leases[leaseID]; info != nil { + info.session.Orphan() + } + elm.mu.Unlock() + } +} + +// TotalLeases returns the number of managed leases. +func (elm *etcdLeaseManager) TotalLeases() uint32 { + elm.mu.RLock() + defer elm.mu.RUnlock() + + return uint32(len(elm.leases)) +} + +// Wait waits until all child goroutines terminated. +func (elm *etcdLeaseManager) Wait() { + elm.wg.Wait() +} + +func (elm *etcdLeaseManager) newSession(ctx context.Context) (session *concurrency.Session, err error) { + defer func(duration *spanstat.SpanStat) { + increaseMetric("lease", metricSet, "AcquireLease", duration.EndError(err).Total(), err) + }(spanstat.Start()) + resp, err := elm.client.Grant(ctx, int64(elm.ttl.Seconds())) + if err != nil { + return nil, err + } + leaseID := resp.ID + + // Construct the session specifying the lease just acquired. This allows to + // split the possibly blocking operation (i.e., lease acquisition), from the + // non-blocking one (i.e., the setup of the keepalive logic), so that we can use + // different contexts. We want the lease acquisition to be controlled by the + // context associated with the given request, while the keepalive process should + // continue until either the etcd client is closed or the session is orphaned. + session, err = concurrency.NewSession(elm.client, + concurrency.WithLease(leaseID), + concurrency.WithTTL(int(elm.ttl.Seconds())), + ) + if err != nil { + return nil, err + } + + elm.wg.Add(1) + go elm.waitForExpiration(session) + + elm.log.WithFields(logrus.Fields{ + "LeaseID": leaseID, + "TTL": elm.ttl, + }).Info("New lease successfully acquired") + return session, nil +} + +func (elm *etcdLeaseManager) waitForExpiration(session *concurrency.Session) { + defer elm.wg.Done() + + // Block until the session gets orphaned, either because it fails to be + // renewed or the etcd client is closed. + <-session.Done() + + select { + case <-elm.client.Ctx().Done(): + // The context of the etcd client was closed + return + default: + } + + elm.log.WithField("LeaseID", session.Lease()).Warning("Lease expired") + + elm.mu.Lock() + delete(elm.leases, session.Lease()) + + var keys []string + for key, id := range elm.keys { + if id == session.Lease() { + keys = append(keys, key) + delete(elm.keys, key) + } + } + elm.mu.Unlock() + + if elm.expired != nil { + for _, key := range keys { + elm.expired(key) + } + } +} + +func (elm *etcdLeaseManager) releaseUnlocked(key string) { + leaseID := elm.keys[key] + if leaseID != client.NoLease { + if info := elm.leases[leaseID]; info != nil && info.count > 0 { + info.count-- + } + delete(elm.keys, key) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/events.go b/vendor/github.com/cilium/cilium/pkg/kvstore/events.go new file mode 100644 index 0000000000..56c71b4c5f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/events.go @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium +package kvstore + +import ( + "sync" +) + +// EventType defines the type of watch event that occurred +type EventType int + +const ( + // EventTypeCreate represents a newly created key + EventTypeCreate EventType = iota + // EventTypeModify represents a modified key + EventTypeModify + // EventTypeDelete represents a deleted key + EventTypeDelete + //EventTypeListDone signals that the initial list operation has completed + EventTypeListDone +) + +// String() returns the human readable format of an event type +func (t EventType) String() string { + switch t { + case EventTypeCreate: + return "create" + case EventTypeModify: + return "modify" + case EventTypeDelete: + return "delete" + case EventTypeListDone: + return "listDone" + default: + return "unknown" + } +} + +// KeyValueEvent is a change event for a Key/Value pair +type KeyValueEvent struct { + // Typ is the type of event { EventTypeCreate | EventTypeModify | EventTypeDelete | EventTypeListDone } + Typ EventType + + // Key is the kvstore key that changed + Key string + + // Value is the kvstore value associated with the key + Value []byte +} + +// EventChan is a channel to receive events on +type EventChan chan KeyValueEvent + +// stopChan is the channel used to indicate stopping of the watcher +type stopChan chan struct{} + +// Watcher represents a KVstore watcher +type Watcher struct { + // Events is the channel to which change notifications will be sent to + Events EventChan `json:"-"` + + Prefix string `json:"prefix"` + stopWatch stopChan + + // stopOnce guarantees that Stop() is only called once + stopOnce sync.Once + + // stopWait is the wait group to wait for watchers to exit gracefully + stopWait sync.WaitGroup +} + +func newWatcher(prefix string, chanSize int) *Watcher { + w := &Watcher{ + Prefix: prefix, + Events: make(EventChan, chanSize), + stopWatch: make(stopChan), + } + + w.stopWait.Add(1) + + return w +} + +// Stop stops a watcher previously created and started with Watch() +func (w *Watcher) Stop() { + w.stopOnce.Do(func() { + close(w.stopWatch) + log.WithField(fieldPrefix, w.Prefix).Debug("Stopped watcher") + w.stopWait.Wait() + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go new file mode 100644 index 0000000000..12f24de538 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "strings" + + "github.com/cilium/cilium/pkg/time" +) + +// Value is an abstraction of the data stored in the kvstore as well as the +// mod revision of that data. +type Value struct { + Data []byte + ModRevision uint64 + LeaseID int64 + SessionID string +} + +// KeyValuePairs is a map of key=value pairs +type KeyValuePairs map[string]Value + +// Capabilities is a bitmask to indicate the capabilities of a backend +type Capabilities uint32 + +const ( + // CapabilityCreateIfExists is true if CreateIfExists is functional + CapabilityCreateIfExists Capabilities = 1 << 0 + + // CapabilityDeleteOnZeroCount is true if DeleteOnZeroCount is functional + CapabilityDeleteOnZeroCount Capabilities = 1 << 1 + + // BaseKeyPrefix is the base prefix that should be used for all keys + BaseKeyPrefix = "cilium" + + // InitLockPath is the path to the init lock to test quorum + InitLockPath = BaseKeyPrefix + "/.initlock" + + // HeartbeatPath is the path to the key at which the operator updates + // the heartbeat + HeartbeatPath = BaseKeyPrefix + "/.heartbeat" + + // HasClusterConfigPath is the path to the key used to convey that the cluster + // configuration will be eventually created, and remote cilium agents shall + // wait until it is present. If this key is not set, the cilium configuration + // might, or might not, be configured, but the agents will continue regardless, + // falling back to the backward compatible behavior. It must be set before that + // the agents have the possibility to connect to the kvstore (that is, when + // it is not yet exposed). The corresponding values is ignored. + HasClusterConfigPath = BaseKeyPrefix + "/.has-cluster-config" + + // ClusterConfigPrefix is the kvstore prefix to cluster configuration + ClusterConfigPrefix = BaseKeyPrefix + "/cluster-config" + + // SyncedPrefix is the kvstore prefix used to convey whether + // synchronization from an external source has completed for a given prefix + SyncedPrefix = BaseKeyPrefix + "/synced" + + // HeartbeatWriteInterval is the interval in which the heartbeat key at + // HeartbeatPath is updated + HeartbeatWriteInterval = time.Minute +) + +// StateToCachePrefix converts a kvstore prefix starting with "cilium/state" +// (holding the cilium state) to the corresponding one holding cached information +// from another kvstore (that is, "cilium/cache"). +func StateToCachePrefix(prefix string) string { + if strings.HasPrefix(prefix, "cilium/state") { + return strings.Replace(prefix, "cilium/state", "cilium/cache", 1) + } + return prefix +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go new file mode 100644 index 0000000000..5e72ba597e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "context" + "fmt" + + "github.com/davecgh/go-spew/spew" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/debug" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +var ( + kvstoreLocks = pathLocks{lockPaths: map[string]lockOwner{}} + + // staleLockTimeout is the timeout after which waiting for a believed + // other local lock user for the same key is given up on and etcd is + // asked directly. It is still highly unlikely that concurrent access + // occurs as only one consumer will manage to acquire the newly + // released lock. The only possibility of concurrent access is if a + // consumer is *still* holding the lock but this is highly unlikely + // given the duration of this timeout. + staleLockTimeout = defaults.KVStoreStaleLockTimeout +) + +type KVLocker interface { + Unlock(ctx context.Context) error + // Comparator returns an object that should be used by the KVStore to make + // sure if the lock is still valid for its client or nil if no such + // verification exists. + Comparator() interface{} +} + +// getLockPath returns the lock path representation of the given path. +func getLockPath(path string) string { + return path + ".lock" +} + +type lockOwner struct { + created time.Time + id uuid.UUID +} + +type pathLocks struct { + mutex lock.RWMutex + lockPaths map[string]lockOwner +} + +func init() { + debug.RegisterStatusObject("kvstore-locks", &kvstoreLocks) +} + +// DebugStatus implements debug.StatusObject to provide debug status collection +// ability +func (pl *pathLocks) DebugStatus() string { + pl.mutex.RLock() + str := spew.Sdump(pl.lockPaths) + pl.mutex.RUnlock() + return str +} + +func (pl *pathLocks) runGC() { + pl.mutex.Lock() + for path, owner := range pl.lockPaths { + if time.Since(owner.created) > staleLockTimeout { + log.WithField("path", path).Error("Forcefully unlocking local kvstore lock") + delete(pl.lockPaths, path) + } + } + pl.mutex.Unlock() +} + +func (pl *pathLocks) lock(ctx context.Context, path string) (id uuid.UUID, err error) { + lockTimer, lockTimerDone := inctimer.New() + defer lockTimerDone() + for { + pl.mutex.Lock() + if _, ok := pl.lockPaths[path]; !ok { + id = uuid.New() + pl.lockPaths[path] = lockOwner{ + created: time.Now(), + id: id, + } + pl.mutex.Unlock() + return + } + pl.mutex.Unlock() + + select { + case <-lockTimer.After(time.Duration(10) * time.Millisecond): + case <-ctx.Done(): + err = fmt.Errorf("lock was cancelled: %s", ctx.Err()) + return + } + } +} + +func (pl *pathLocks) unlock(path string, id uuid.UUID) { + pl.mutex.Lock() + if owner, ok := pl.lockPaths[path]; ok && owner.id == id { + delete(pl.lockPaths, path) + } + pl.mutex.Unlock() +} + +// Lock is a lock return by LockPath +type Lock struct { + path string + id uuid.UUID + kvLock KVLocker +} + +// LockPath locks the specified path. The key for the lock is not the path +// provided itself but the path with a suffix of ".lock" appended. The lock +// returned also contains a patch specific local Mutex which will be held. +// +// It is required to call Unlock() on the returned Lock to unlock +func LockPath(ctx context.Context, backend BackendOperations, path string) (l *Lock, err error) { + id, err := kvstoreLocks.lock(ctx, path) + if err != nil { + return nil, err + } + + lock, err := backend.LockPath(ctx, path) + if err != nil { + kvstoreLocks.unlock(path, id) + Trace("Failed to lock", err, logrus.Fields{fieldKey: path}) + err = fmt.Errorf("error while locking path %s: %s", path, err) + return nil, err + } + + Trace("Successful lock", err, logrus.Fields{fieldKey: path}) + return &Lock{kvLock: lock, path: path, id: id}, err +} + +// RunLockGC inspects all local kvstore locks to determine whether they have +// been held longer than the stale lock timeout, and if so, unlocks them +// forceably. +func RunLockGC() { + kvstoreLocks.runGC() +} + +// Unlock unlocks a lock +func (l *Lock) Unlock(ctx context.Context) error { + if l == nil { + return nil + } + + // Unlock kvstore mutex first + err := l.kvLock.Unlock(ctx) + if err != nil { + log.WithError(err).WithField("path", l.path).Error("Unable to unlock kvstore lock") + } + + // unlock local lock even if kvstore cannot be unlocked + kvstoreLocks.unlock(l.path, l.id) + Trace("Unlocked", nil, logrus.Fields{fieldKey: l.path}) + + return err +} + +func (l *Lock) Comparator() interface{} { + return l.kvLock.Comparator() +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go b/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go new file mode 100644 index 0000000000..cf6b9792a8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "kvstore") + +const ( + // fieldKVStoreModule is the name of the kvstore backend (etcd or consul) + fieldKVStoreModule = "module" + + // key revision + fieldRev = "revision" + + // fieldPrefix is the prefix of the key used in the operation + fieldPrefix = "prefix" + + // fieldKey is the prefix of the key used in the operation + fieldKey = "key" + + // fieldValue is the prefix of the key used in the operation + fieldValue = "value" + + // fieldCondition is the condition that requires to be met + fieldCondition = "condition" + + // fieldNumEntries is the number of entries in the result + fieldNumEntries = "numEntries" + + // fieldRemainingEntries is the number of entries still to be retrieved + fieldRemainingEntries = "remainingEntries" + + // fieldAttachLease is true if the key must be attached to a lease + fieldAttachLease = "attachLease" + + // FieldUser identifies a user in the kvstore + FieldUser = logfields.User + + // FieldRole identifies a role in the kvstore + FieldRole = "role" +) diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go b/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go new file mode 100644 index 0000000000..4f09efd734 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "fmt" + "strings" + + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/time" +) + +const ( + metricDelete = "delete" + metricRead = "read" + metricSet = "set" +) + +func GetScopeFromKey(key string) string { + s := strings.SplitN(key, "/", 5) + if len(s) < 4 { + if len(key) >= 12 { + return key[:12] + } + return key + } + return fmt.Sprintf("%s/%s", s[2], s[3]) +} + +func increaseMetric(key, kind, action string, duration time.Duration, err error) { + if !metrics.KVStoreOperationsDuration.IsEnabled() { + return + } + namespace := GetScopeFromKey(key) + outcome := metrics.Error2Outcome(err) + metrics.KVStoreOperationsDuration. + WithLabelValues(namespace, kind, action, outcome).Observe(duration.Seconds()) +} + +func trackEventQueued(key string, typ EventType, duration time.Duration) { + if !metrics.KVStoreEventsQueueDuration.IsEnabled() { + return + } + metrics.KVStoreEventsQueueDuration.WithLabelValues(GetScopeFromKey(key), typ.String()).Observe(duration.Seconds()) +} + +func recordQuorumError(err string) { + if !metrics.KVStoreQuorumErrors.IsEnabled() { + return + } + metrics.KVStoreQuorumErrors.WithLabelValues(err).Inc() +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go new file mode 100644 index 0000000000..238c641d4a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "github.com/cilium/cilium/pkg/hive/cell" +) + +var Cell = cell.Module( + "kvstore-utils", + "Provides factory for kvstore related synchronizers", + + cell.Provide(NewFactory), + + cell.Metric(MetricsProvider), +) + +type Factory interface { + NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore + NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore + NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager +} + +type factoryImpl struct { + metrics *Metrics +} + +func (w *factoryImpl) NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore { + return newWorkqueueSyncStore(clusterName, backend, prefix, w.metrics, opts...) +} + +func (w *factoryImpl) NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore { + return newRestartableWatchStore(clusterName, keyCreator, observer, w.metrics, opts...) +} + +func (w *factoryImpl) NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager { + return newWatchStoreManagerSync(backend, clusterName, w) +} + +func NewFactory(storeMetrics *Metrics) Factory { + return &factoryImpl{ + metrics: storeMetrics, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go new file mode 100644 index 0000000000..7800652933 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package store implements a shared store backed by a kvstore or similar with +// the following properties: +// +// - A single type is used to represent all keys +// - Any number of collaborators can join the store. Typically a collaborator +// is an individual Cilium agent running on each node. +// - All collaborators can own and contribute keys to the store. Each key is +// owned by exactly one collaborator. It is the responsibility of each +// collaborator to pick a key name which is guaranteed to be unique. +// - All collaborate desire to see all keys within the scope of a store. The +// scope of the store is defined by a common key prefix. For this purpose, +// each collaborator maintains a local cache of all keys in the store by +// subscribing to change events. +package store diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go new file mode 100644 index 0000000000..171436f0d7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/metrics/metric" +) + +type Metrics struct { + KVStoreSyncQueueSize metric.Vec[metric.Gauge] + KVStoreSyncErrors metric.Vec[metric.Counter] + KVStoreInitialSyncCompleted metric.Vec[metric.Gauge] +} + +func MetricsProvider() *Metrics { + return &Metrics{ + KVStoreSyncQueueSize: metric.NewGaugeVec(metric.GaugeOpts{ + Namespace: metrics.Namespace, + Subsystem: metrics.SubsystemKVStore, + Name: "sync_queue_size", + Help: "Number of elements queued for synchronization in the kvstore", + }, []string{metrics.LabelScope, metrics.LabelSourceCluster}), + KVStoreSyncErrors: metric.NewCounterVec(metric.CounterOpts{ + Namespace: metrics.Namespace, + Subsystem: metrics.SubsystemKVStore, + Name: "sync_errors_total", + Help: "Number of times synchronization to the kvstore failed", + }, []string{metrics.LabelScope, metrics.LabelSourceCluster}), + KVStoreInitialSyncCompleted: metric.NewGaugeVec(metric.GaugeOpts{ + Namespace: metrics.Namespace, + Subsystem: metrics.SubsystemKVStore, + Name: "initial_sync_completed", + Help: "Whether the initial synchronization from/to the kvstore has completed", + }, []string{metrics.LabelScope, metrics.LabelSourceCluster, metrics.LabelAction}), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go new file mode 100644 index 0000000000..73a05859b7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "context" + "fmt" + "path" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/controller" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" +) + +const ( + // listTimeoutDefault is the default timeout to wait while performing + // the initial list operation of objects from the kvstore + listTimeoutDefault = 3 * time.Minute + + // watcherChanSize is the size of the channel to buffer kvstore events + watcherChanSize = 100 +) + +var ( + controllers controller.Manager + + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "shared-store") + + kvstoreSyncControllerGroup = controller.NewGroup("kvstore-sync") +) + +// KeyCreator is the function to create a new empty Key instances. Store +// collaborators must implement this interface and provide the implementation +// in the Configuration structure. +type KeyCreator func() Key + +// Configuration is the set of configuration parameters of a shared store. +type Configuration struct { + // Prefix is the key prefix of the store shared by all keys. The prefix + // is the unique identification of the store. Multiple collaborators + // connected to the same kvstore cluster configuring stores with + // matching prefixes will automatically form a shared store. This + // parameter is required. + Prefix string + + // SynchronizationInterval is the interval in which locally owned keys + // are synchronized with the kvstore. This parameter is optional. + SynchronizationInterval time.Duration + + // SharedKeyDeleteDelay is the delay before a shared key delete is + // handled. This parameter is optional, and defaults to 0 if unset. + SharedKeyDeleteDelay time.Duration + + // KeyCreator is called to allocate a Key instance when a new shared + // key is discovered. This parameter is required. + KeyCreator KeyCreator + + // Backend is the kvstore to use as a backend. If no backend is + // specified, kvstore.Client() is being used. + Backend kvstore.BackendOperations + + // Observer is the observe that will receive events on key mutations + Observer Observer + + Context context.Context +} + +// validate is invoked by JoinSharedStore to validate and complete the +// configuration. It returns nil when the configuration is valid. +func (c *Configuration) validate() error { + if c.Prefix == "" { + return fmt.Errorf("prefix must be specified") + } + + if c.KeyCreator == nil { + return fmt.Errorf("KeyCreator must be specified") + } + + if c.SynchronizationInterval == 0 { + c.SynchronizationInterval = option.Config.KVstorePeriodicSync + } + + if c.Backend == nil { + c.Backend = kvstore.Client() + } + + if c.Context == nil { + c.Context = context.Background() + } + + return nil +} + +// SharedStore is an instance of a shared store. It is created with +// JoinSharedStore() and released with the SharedStore.Close() function. +type SharedStore struct { + // conf is a copy of the store configuration. This field is never + // mutated after JoinSharedStore() so it is safe to access this without + // a lock. + conf Configuration + + // name is the name of the shared store. It is derived from the kvstore + // prefix. + name string + + // controllerName is the name of the controller used to synchronize + // with the kvstore. It is derived from the name. + controllerName string + + // backend is the backend as configured via Configuration + backend kvstore.BackendOperations + + // mutex protects mutations to localKeys and sharedKeys + mutex lock.RWMutex + + // localKeys is a map of keys that are owned by the local instance. All + // local keys are synchronized with the kvstore. This map can be + // modified with UpdateLocalKey() and DeleteLocalKey(). + localKeys map[string]LocalKey + + // sharedKeys is a map of all keys that either have been discovered + // from remote collaborators or successfully shared local keys. This + // map represents the state in the kvstore and is updated based on + // kvstore events. + sharedKeys map[string]Key + + kvstoreWatcher *kvstore.Watcher +} + +// Observer receives events when objects in the store mutate +type Observer interface { + // OnDelete is called when the key has been deleted from the shared store + OnDelete(k NamedKey) + + // OnUpdate is called whenever a change has occurred in the data + // structure represented by the key + OnUpdate(k Key) +} + +// NamedKey is an interface that a data structure must implement in order to +// be deleted from a SharedStore. +type NamedKey interface { + // GetKeyName must return the name of the key. The name of the key must + // be unique within the store and stable for a particular key. The name + // of the key must be identical across agent restarts as the keys + // remain in the kvstore. + GetKeyName() string +} + +// Key is the interface that a data structure must implement in order to be +// stored and shared as a key in a SharedStore. +type Key interface { + NamedKey + + // Marshal is called to retrieve the byte slice representation of the + // data represented by the key to store it in the kvstore. The function + // must ensure that the underlying datatype is properly locked. It is + // typically a good idea to use json.Marshal to implement this + // function. + Marshal() ([]byte, error) + + // Unmarshal is called when an update from the kvstore is received. The + // prefix configured for the store is removed from the key, and the + // byte slice passed to the function is coming from the Marshal + // function from another collaborator. The function must unmarshal and + // update the underlying data type. It is typically a good idea to use + // json.Unmarshal to implement this function. + Unmarshal(key string, data []byte) error +} + +// LocalKey is a Key owned by the local store instance +type LocalKey interface { + Key + + // DeepKeyCopy must return a deep copy of the key + DeepKeyCopy() LocalKey +} + +// KVPair represents a basic implementation of the LocalKey interface +type KVPair struct{ Key, Value string } + +func NewKVPair(key, value string) *KVPair { return &KVPair{Key: key, Value: value} } +func KVPairCreator() Key { return &KVPair{} } + +func (kv *KVPair) GetKeyName() string { return kv.Key } +func (kv *KVPair) Marshal() ([]byte, error) { return []byte(kv.Value), nil } + +func (kv *KVPair) Unmarshal(key string, data []byte) error { + kv.Key, kv.Value = key, string(data) + return nil +} + +func (kv *KVPair) DeepKeyCopy() LocalKey { + return NewKVPair(kv.Key, kv.Value) +} + +// JoinSharedStore creates a new shared store based on the provided +// configuration. An error is returned if the configuration is invalid. The +// store is initialized with the contents of the kvstore. An error is returned +// if the contents cannot be retrieved synchronously from the kvstore. Starts a +// controller to continuously synchronize the store with the kvstore. +func JoinSharedStore(c Configuration) (*SharedStore, error) { + if err := c.validate(); err != nil { + return nil, err + } + + s := &SharedStore{ + conf: c, + localKeys: map[string]LocalKey{}, + sharedKeys: map[string]Key{}, + backend: c.Backend, + } + + s.name = "store-" + s.conf.Prefix + s.controllerName = "kvstore-sync-" + s.name + + if err := s.listAndStartWatcher(); err != nil { + return nil, err + } + + controllers.UpdateController(s.controllerName, + controller.ControllerParams{ + Group: kvstoreSyncControllerGroup, + DoFunc: func(ctx context.Context) error { + return s.syncLocalKeys(ctx, true) + }, + RunInterval: s.conf.SynchronizationInterval, + }, + ) + + return s, nil +} + +func (s *SharedStore) onDelete(k NamedKey) { + if s.conf.Observer != nil { + s.conf.Observer.OnDelete(k) + } +} + +func (s *SharedStore) onUpdate(k Key) { + if s.conf.Observer != nil { + s.conf.Observer.OnUpdate(k) + } +} + +// Release frees all resources own by the store but leaves all keys in the +// kvstore intact +func (s *SharedStore) Release() { + // Wait for all write operations to complete and then block all further + // operations + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.kvstoreWatcher != nil { + s.kvstoreWatcher.Stop() + } + + controllers.RemoveController(s.controllerName) +} + +// Close stops participation with a shared store and removes all keys owned by +// this node in the kvstore. This stops the controller started by +// JoinSharedStore(). +func (s *SharedStore) Close(ctx context.Context) { + s.Release() + + for name, key := range s.localKeys { + if err := s.backend.Delete(ctx, s.keyPath(key)); err != nil { + s.getLogger().WithError(err).Warning("Unable to delete key in kvstore") + } + + delete(s.localKeys, name) + // Since we have received our own notification we also need to remove + // it from the shared keys. + delete(s.sharedKeys, name) + + s.onDelete(key) + } +} + +// keyPath returns the absolute kvstore path of a key +func (s *SharedStore) keyPath(key NamedKey) string { + // WARNING - STABLE API: The composition of the absolute key path + // cannot be changed without breaking up and downgrades. + return path.Join(s.conf.Prefix, key.GetKeyName()) +} + +// syncLocalKey synchronizes a key to the kvstore +func (s *SharedStore) syncLocalKey(ctx context.Context, key LocalKey, lease bool) error { + jsonValue, err := key.Marshal() + if err != nil { + return err + } + + // Update key in kvstore, overwrite an eventual existing key. If requested, attach + // lease to expire entry when agent dies and never comes back up. + if _, err := s.backend.UpdateIfDifferent(ctx, s.keyPath(key), jsonValue, lease); err != nil { + return err + } + + return nil +} + +// syncLocalKeys synchronizes all local keys with the kvstore +func (s *SharedStore) syncLocalKeys(ctx context.Context, lease bool) error { + // Create a copy of all local keys so we can unlock and sync to kvstore + // without holding the lock + s.mutex.RLock() + keys := make([]LocalKey, 0, len(s.localKeys)) + for _, key := range s.localKeys { + keys = append(keys, key) + } + s.mutex.RUnlock() + + for _, key := range keys { + if err := s.syncLocalKey(ctx, key, lease); err != nil { + return err + } + } + + return nil +} + +func (s *SharedStore) lookupLocalKey(name string) LocalKey { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for _, key := range s.localKeys { + if key.GetKeyName() == name { + return key + } + } + + return nil +} + +// NumEntries returns the number of entries in the store +func (s *SharedStore) NumEntries() int { + if s == nil { + return 0 + } + + s.mutex.RLock() + defer s.mutex.RUnlock() + return len(s.sharedKeys) +} + +// SharedKeysMap returns a copy of the SharedKeysMap, the returned map can +// be safely modified but the values of the map represent the actual data +// stored in the internal SharedStore SharedKeys map. +func (s *SharedStore) SharedKeysMap() map[string]Key { + s.mutex.RLock() + defer s.mutex.RUnlock() + sharedKeysCopy := make(map[string]Key, len(s.sharedKeys)) + + for k, v := range s.sharedKeys { + sharedKeysCopy[k] = v + } + return sharedKeysCopy +} + +// UpdateLocalKeySync synchronously synchronizes a local key with the kvstore +// and adds it to the list of local keys to be synchronized if the initial +// synchronous synchronization was successful +func (s *SharedStore) UpdateLocalKeySync(ctx context.Context, key LocalKey) error { + s.mutex.Lock() + defer s.mutex.Unlock() + err := s.syncLocalKey(ctx, key, true) + if err == nil { + s.localKeys[key.GetKeyName()] = key.DeepKeyCopy() + } + return err +} + +// UpdateKeySync synchronously synchronizes a key with the kvstore. +func (s *SharedStore) UpdateKeySync(ctx context.Context, key LocalKey, lease bool) error { + return s.syncLocalKey(ctx, key, lease) +} + +// DeleteLocalKey removes a key from being synchronized with the kvstore +func (s *SharedStore) DeleteLocalKey(ctx context.Context, key NamedKey) { + name := key.GetKeyName() + + s.mutex.Lock() + _, ok := s.localKeys[name] + delete(s.localKeys, name) + s.mutex.Unlock() + + err := s.backend.Delete(ctx, s.keyPath(key)) + + if ok { + if err != nil { + s.getLogger().WithError(err).Warning("Unable to delete key in kvstore") + } + + s.onDelete(key) + } +} + +func (s *SharedStore) getLogger() *logrus.Entry { + return log.WithFields(logrus.Fields{ + "storeName": s.name, + }) +} + +func (s *SharedStore) updateKey(name string, value []byte) error { + newKey := s.conf.KeyCreator() + if err := newKey.Unmarshal(name, value); err != nil { + return err + } + + s.mutex.Lock() + s.sharedKeys[name] = newKey + s.mutex.Unlock() + + s.onUpdate(newKey) + return nil +} + +func (s *SharedStore) deleteSharedKey(name string) { + s.mutex.Lock() + existingKey, ok := s.sharedKeys[name] + delete(s.sharedKeys, name) + s.mutex.Unlock() + + if ok { + go func() { + time.Sleep(s.conf.SharedKeyDeleteDelay) + s.mutex.RLock() + _, ok := s.sharedKeys[name] + s.mutex.RUnlock() + if ok { + s.getLogger().WithFields(logrus.Fields{"key": name, "timeWindow": s.conf.SharedKeyDeleteDelay}). + Warning("Received delete event for key which re-appeared within delay time window") + return + } + + s.onDelete(existingKey) + }() + } else { + s.getLogger().WithField("key", name). + Warning("Unable to find deleted key in local state") + } +} + +func (s *SharedStore) listAndStartWatcher() error { + listDone := make(chan struct{}) + + go s.watcher(listDone) + + select { + case <-listDone: + case <-time.After(listTimeoutDefault): + return fmt.Errorf("timeout while retrieving initial list of objects from kvstore") + } + + return nil +} + +func (s *SharedStore) watcher(listDone chan struct{}) { + s.kvstoreWatcher = s.backend.ListAndWatch(s.conf.Context, s.conf.Prefix, watcherChanSize) + + for event := range s.kvstoreWatcher.Events { + if event.Typ == kvstore.EventTypeListDone { + s.getLogger().Debug("Initial list of objects received from kvstore") + close(listDone) + continue + } + + logger := s.getLogger().WithFields(logrus.Fields{ + "key": event.Key, + "eventType": event.Typ, + }) + + logger.Debugf("Received key update via kvstore [value %s]", string(event.Value)) + + keyName := strings.TrimPrefix(event.Key, s.conf.Prefix) + if keyName[0] == '/' { + keyName = keyName[1:] + } + + switch event.Typ { + case kvstore.EventTypeCreate, kvstore.EventTypeModify: + if err := s.updateKey(keyName, event.Value); err != nil { + logger.WithError(err).Warningf("Unable to unmarshal store value: %s", string(event.Value)) + } + + case kvstore.EventTypeDelete: + if localKey := s.lookupLocalKey(keyName); localKey != nil { + logger.Warning("Received delete event for local key. Re-creating the key in the kvstore") + + s.syncLocalKey(s.conf.Context, localKey, true) + } else { + s.deleteSharedKey(keyName) + } + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go new file mode 100644 index 0000000000..f8af6aa999 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "bytes" + "context" + "fmt" + "path" + "strings" + "sync" + "sync/atomic" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "k8s.io/client-go/util/workqueue" + + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/time" +) + +// SyncStore abstracts the operations allowing to synchronize key/value pairs +// into a kvstore. +type SyncStore interface { + // Run starts the SyncStore logic, blocking until the context is closed. + Run(ctx context.Context) + + // UpsertKey upserts a key/value pair into the kvstore. + UpsertKey(ctx context.Context, key Key) error + + // DeleteKey removes a key from the kvstore. + DeleteKey(ctx context.Context, key NamedKey) error + + // Synced triggers the insertion of the "synced" key associated with this + // store into the kvstore once all upsertions already issued have completed + // successfully, eventually executing all specified callbacks (if any). + // Only the first invocation takes effect. + Synced(ctx context.Context, callbacks ...func(ctx context.Context)) error +} + +// SyncStoreBackend represents the subset kvstore.BackendOperations leveraged +// by SyncStore implementations. +type SyncStoreBackend interface { + // Update creates or updates a key. + Update(ctx context.Context, key string, value []byte, lease bool) error + // Delete deletes a key. + Delete(ctx context.Context, key string) error + + // RegisterLeaseExpiredObserver registers a function which is executed when + // the lease associated with a key having the given prefix is detected as expired. + RegisterLeaseExpiredObserver(prefix string, fn func(key string)) +} + +// wqSyncStore implements the SyncStore interface leveraging a workqueue to +// coalescence update/delete requests and handle retries in case of errors. +type wqSyncStore struct { + backend SyncStoreBackend + prefix string + source string + + workers uint + withLease bool + + limiter workqueue.RateLimiter + workqueue workqueue.RateLimitingInterface + state lock.Map[string, []byte] // map[NamedKey.GetKeyName()]Key.Marshal() + + synced atomic.Bool // Synced() has been triggered + pendingSync lock.Map[string, struct{}] // the set of keys still to sync + syncedKey string + syncedCallbacks []func(context.Context) + + log *logrus.Entry + queuedMetric prometheus.Gauge + errorsMetric prometheus.Counter + syncedMetric prometheus.Gauge +} + +type syncCanary struct{ skipCallbacks bool } + +type WSSOpt func(*wqSyncStore) + +// WSSWithRateLimiter sets the rate limiting algorithm to be used when requeueing failed events. +func WSSWithRateLimiter(limiter workqueue.RateLimiter) WSSOpt { + return func(wss *wqSyncStore) { + wss.limiter = limiter + } +} + +// WSSWithWorkers configures the number of workers spawned by Run() to handle update/delete operations. +func WSSWithWorkers(workers uint) WSSOpt { + return func(wss *wqSyncStore) { + wss.workers = workers + } +} + +// WSSWithoutLease disables attaching the lease to upserted keys. +func WSSWithoutLease() WSSOpt { + return func(wss *wqSyncStore) { + wss.withLease = false + } +} + +// WSSWithSyncedKeyOverride overrides the "synced" key inserted into the kvstore +// when initial synchronization completed (by default it corresponds to the prefix). +func WSSWithSyncedKeyOverride(key string) WSSOpt { + return func(wss *wqSyncStore) { + wss.syncedKey = key + } +} + +// NewWorkqueueSyncStore returns a SyncStore instance which leverages a workqueue +// to coalescence update/delete requests and handle retries in case of errors. +func newWorkqueueSyncStore(clusterName string, backend SyncStoreBackend, prefix string, m *Metrics, opts ...WSSOpt) SyncStore { + wss := &wqSyncStore{ + backend: backend, + prefix: prefix, + source: clusterName, + + workers: 1, + withLease: true, + limiter: workqueue.DefaultControllerRateLimiter(), + syncedKey: prefix, + + log: log.WithField(logfields.Prefix, prefix), + } + + for _, opt := range opts { + opt(wss) + } + + wss.log = wss.log.WithField(logfields.ClusterName, wss.source) + wss.workqueue = workqueue.NewRateLimitingQueue(wss.limiter) + wss.queuedMetric = m.KVStoreSyncQueueSize.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source) + wss.errorsMetric = m.KVStoreSyncErrors.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source) + wss.syncedMetric = m.KVStoreInitialSyncCompleted.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source, "write") + return wss +} + +// Run starts the SyncStore logic, blocking until the context is closed. +func (wss *wqSyncStore) Run(ctx context.Context) { + var wg sync.WaitGroup + + wss.syncedMetric.Set(metrics.BoolToFloat64(false)) + defer wss.syncedMetric.Set(metrics.BoolToFloat64(false)) + + wss.backend.RegisterLeaseExpiredObserver(wss.prefix, wss.handleExpiredLease) + wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), wss.handleExpiredLease) + + wss.log.WithField(logfields.Workers, wss.workers).Info("Starting workqueue-based sync store") + wg.Add(int(wss.workers)) + for i := uint(0); i < wss.workers; i++ { + go func() { + defer wg.Done() + for wss.processNextItem(ctx) { + } + }() + } + + <-ctx.Done() + + wss.backend.RegisterLeaseExpiredObserver(wss.prefix, nil) + wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), nil) + + wss.log.Info("Shutting down workqueue-based sync store") + wss.workqueue.ShutDown() + wg.Wait() +} + +// UpsertKey registers the key for asynchronous upsertion in the kvstore, if the +// corresponding value has changed. It returns an error in case it is impossible +// to marshal the value, while kvstore failures are automatically handled through +// a retry mechanism. +func (wss *wqSyncStore) UpsertKey(_ context.Context, k Key) error { + key := k.GetKeyName() + value, err := k.Marshal() + if err != nil { + return fmt.Errorf("failed marshaling key %q: %w", k, err) + } + + prevValue, loaded := wss.state.Swap(key, value) + if loaded && bytes.Equal(prevValue, value) { + wss.log.WithField(logfields.Key, k).Debug("ignoring upsert request for already up-to-date key") + } else { + if !wss.synced.Load() { + wss.pendingSync.Store(key, struct{}{}) + } + + wss.workqueue.Add(key) + wss.queuedMetric.Set(float64(wss.workqueue.Len())) + } + + return nil +} + +// DeleteKey registers the key for asynchronous deletion from the kvstore, if it +// was known to be present. It never returns an error, because kvstore failures +// are automatically handled through a retry mechanism. +func (wss *wqSyncStore) DeleteKey(_ context.Context, k NamedKey) error { + key := k.GetKeyName() + if _, loaded := wss.state.LoadAndDelete(key); loaded { + wss.workqueue.Add(key) + wss.queuedMetric.Set(float64(wss.workqueue.Len())) + } else { + wss.log.WithField(logfields.Key, key).Debug("ignoring delete request for non-existing key") + } + + return nil +} + +func (wss *wqSyncStore) Synced(_ context.Context, callbacks ...func(ctx context.Context)) error { + if synced := wss.synced.Swap(true); !synced { + wss.syncedCallbacks = callbacks + wss.workqueue.Add(syncCanary{}) + } + return nil +} + +func (wss *wqSyncStore) processNextItem(ctx context.Context) bool { + // Retrieve the next key to process from the workqueue. + key, shutdown := wss.workqueue.Get() + wss.queuedMetric.Set(float64(wss.workqueue.Len())) + if shutdown { + return false + } + + // We call Done here so the workqueue knows we have finished + // processing this item. + defer func() { + wss.workqueue.Done(key) + // This ensures that the metric is correctly updated in case of requeues. + wss.queuedMetric.Set(float64(wss.workqueue.Len())) + }() + + // Run the handler, passing it the key to be processed as parameter. + if err := wss.handle(ctx, key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + wss.errorsMetric.Inc() + wss.workqueue.AddRateLimited(key) + return true + } + + // Since no error occurred, forget this item so it does not get queued again + // until another change happens. + wss.workqueue.Forget(key) + if skey, ok := key.(string); ok { + wss.pendingSync.Delete(skey) + } + return true +} + +func (wss *wqSyncStore) handle(ctx context.Context, key interface{}) error { + if value, ok := key.(syncCanary); ok { + return wss.handleSync(ctx, value.skipCallbacks) + } + + if value, ok := wss.state.Load(key.(string)); ok { + return wss.handleUpsert(ctx, key.(string), value) + } + + return wss.handleDelete(ctx, key.(string)) +} + +func (wss *wqSyncStore) handleUpsert(ctx context.Context, key string, value []byte) error { + scopedLog := wss.log.WithField(logfields.Key, key) + + err := wss.backend.Update(ctx, wss.keyPath(key), value, wss.withLease) + if err != nil { + scopedLog.WithError(err).Warning("Failed upserting key in kvstore. Retrying...") + return err + } + + scopedLog.Debug("Upserted key in kvstore") + return nil +} + +func (wss *wqSyncStore) handleDelete(ctx context.Context, key string) error { + scopedLog := wss.log.WithField(logfields.Key, key) + + if err := wss.backend.Delete(ctx, wss.keyPath(key)); err != nil { + scopedLog.WithError(err).Warning("Failed deleting key from kvstore. Retrying...") + return err + } + + scopedLog.Debug("Deleted key from kvstore") + return nil +} + +func (wss *wqSyncStore) handleSync(ctx context.Context, skipCallbacks bool) error { + // This could be replaced by wss.toSync.Len() == 0 if it only existed... + syncCompleted := true + wss.pendingSync.Range(func(string, struct{}) bool { + syncCompleted = false + return false + }) + + if !syncCompleted { + return fmt.Errorf("there are still keys to be synchronized") + } + + key := wss.getSyncedKey() + scopedLog := wss.log.WithField(logfields.Key, key) + + err := wss.backend.Update(ctx, key, []byte(time.Now().Format(time.RFC3339)), wss.withLease) + if err != nil { + scopedLog.WithError(err).Warning("Failed upserting synced key in kvstore. Retrying...") + return err + } + + wss.log.Info("Initial synchronization from the external source completed") + wss.syncedMetric.Set(metrics.BoolToFloat64(true)) + + // Execute any callback that might have been registered. + if !skipCallbacks { + for _, callback := range wss.syncedCallbacks { + callback(ctx) + } + } + + return nil +} + +// handleExpiredLease gets executed when the lease attached to a given key expired, +// and is responsible for enqueuing the given key to recreate it. +func (wss *wqSyncStore) handleExpiredLease(key string) { + defer wss.queuedMetric.Set(float64(wss.workqueue.Len())) + + if key == wss.getSyncedKey() { + // Re-enqueue the creation of the sync canary, but make sure that + // the registered callbacks are not executed a second time. + wss.workqueue.Add(syncCanary{skipCallbacks: true}) + return + } + + key = strings.TrimPrefix(strings.TrimPrefix(key, wss.prefix), "/") + _, ok := wss.state.Load(key) + if ok { + wss.log.WithField(logfields.Key, key).Debug("enqueuing upsert request for key as the attached lease expired") + if !wss.synced.Load() { + wss.pendingSync.Store(key, struct{}{}) + } + + wss.workqueue.Add(key) + } +} + +// keyPath returns the absolute kvstore path of a key +func (wss *wqSyncStore) keyPath(key string) string { + // WARNING - STABLE API: The composition of the absolute key path + // cannot be changed without breaking up and downgrades. + return path.Join(wss.prefix, key) +} + +func (wss *wqSyncStore) getSyncedKey() string { + return path.Join(kvstore.SyncedPrefix, wss.source, wss.syncedKey) +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go new file mode 100644 index 0000000000..04881f7179 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "context" + "strings" + "sync/atomic" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/metrics/metric" +) + +// WatchStore abstracts the operations allowing to synchronize key/value pairs +// from a kvstore, emitting the corresponding events. +type WatchStore interface { + // Watch starts watching the specified kvstore prefix, blocking until the context is closed. + // Depending on the implementation, it might be executed multiple times. + Watch(ctx context.Context, backend WatchStoreBackend, prefix string) + + // NumEntries returns the number of entries synchronized from the store. + NumEntries() uint64 + + // Synced returns whether the initial list of entries has been retrieved from + // the kvstore, and new events are currently being watched. + Synced() bool + + // Drain emits a deletion event for each known key. It shall be called only + // when no watch operation is in progress. + Drain() +} + +// WatchStoreBackend represents the subset of kvstore.BackendOperations leveraged +// by WatchStore implementations. +type WatchStoreBackend interface { + // ListAndWatch creates a new watcher for the given prefix after listing the existing keys. + ListAndWatch(ctx context.Context, prefix string, chanSize int) *kvstore.Watcher +} + +type RWSOpt func(*restartableWatchStore) + +// WSWithOnSyncCallback registers a function to be executed after +// listing all keys from the kvstore for the first time. Multiple +// callback functions can be registered. +func RWSWithOnSyncCallback(callback func(ctx context.Context)) RWSOpt { + return func(rws *restartableWatchStore) { + rws.onSyncCallbacks = append(rws.onSyncCallbacks, callback) + } +} + +// WSWithEntriesGauge registers a Prometheus gauge metric that is kept +// in sync with the number of entries synchronized from the kvstore. +func RWSWithEntriesMetric(gauge prometheus.Gauge) RWSOpt { + return func(rws *restartableWatchStore) { + rws.entriesMetric = gauge + } +} + +type rwsEntry struct { + key Key + stale bool +} + +// restartableWatchStore implements the WatchStore interface, supporting +// multiple executions of the Watch() operation (granted that the previous one +// already terminated). This allows to transparently handle the case in which +// we had to create a new etcd connection (for instance following a failure) +// which refers to the same remote cluster. +type restartableWatchStore struct { + source string + keyCreator KeyCreator + observer Observer + + watching atomic.Bool + synced atomic.Bool + onSyncCallbacks []func(ctx context.Context) + + // Using a separate entries counter avoids the need for synchronizing the + // access to the state map, since the only concurrent reader is represented + // by the NumEntries() function. + state map[string]*rwsEntry + numEntries atomic.Uint64 + + log *logrus.Entry + entriesMetric prometheus.Gauge + syncMetric metric.Vec[metric.Gauge] +} + +// NewRestartableWatchStore returns a WatchStore instance which supports +// restarting the watch operation multiple times, automatically handling +// the emission of deletion events for all stale entries (if enabled). It +// shall be restarted only once the previous Watch execution terminated. +func newRestartableWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, m *Metrics, opts ...RWSOpt) WatchStore { + rws := &restartableWatchStore{ + source: clusterName, + keyCreator: keyCreator, + observer: observer, + + state: make(map[string]*rwsEntry), + + log: log, + entriesMetric: metrics.NoOpGauge, + syncMetric: m.KVStoreInitialSyncCompleted, + } + + for _, opt := range opts { + opt(rws) + } + + rws.log = rws.log.WithField(logfields.ClusterName, rws.source) + return rws +} + +// Watch starts watching the specified kvstore prefix, blocking until the context is closed. +// It might be executed multiple times, granted that the previous execution already terminated. +func (rws *restartableWatchStore) Watch(ctx context.Context, backend WatchStoreBackend, prefix string) { + // Append a trailing "/" to the prefix, to make sure that we watch only + // sub-elements belonging to that prefix, and not to sibling prefixes + // (for instance in case the last part of the prefix is the cluster name, + // and one is the substring of another). + if !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } + + rws.log = rws.log.WithField(logfields.Prefix, prefix) + syncedMetric := rws.syncMetric.WithLabelValues( + kvstore.GetScopeFromKey(prefix), rws.source, "read") + + rws.log.Info("Starting restartable watch store") + syncedMetric.Set(metrics.BoolToFloat64(false)) + + if rws.watching.Swap(true) { + rws.log.Panic("Cannot start the watch store while still running") + } + + defer func() { + rws.log.Info("Stopped restartable watch store") + syncedMetric.Set(metrics.BoolToFloat64(false)) + rws.watching.Store(false) + rws.synced.Store(false) + }() + + // Mark all known keys as stale. + for _, entry := range rws.state { + entry.stale = true + } + + // The events channel is closed when the context is closed. + watcher := backend.ListAndWatch(ctx, prefix, 0) + for event := range watcher.Events { + if event.Typ == kvstore.EventTypeListDone { + rws.log.Debug("Initial synchronization completed") + rws.drainKeys(true) + syncedMetric.Set(metrics.BoolToFloat64(true)) + rws.synced.Store(true) + + for _, callback := range rws.onSyncCallbacks { + callback(ctx) + } + + // Clear the list of callbacks so that they don't get executed + // a second time in case of reconnections. + rws.onSyncCallbacks = nil + + continue + } + + key := strings.TrimPrefix(event.Key, prefix) + rws.log.WithFields(logrus.Fields{ + logfields.Key: key, + logfields.Event: event.Typ, + }).Debug("Received event from kvstore") + + switch event.Typ { + case kvstore.EventTypeCreate, kvstore.EventTypeModify: + rws.handleUpsert(key, event.Value) + case kvstore.EventTypeDelete: + rws.handleDelete(key) + } + } +} + +// NumEntries returns the number of entries synchronized from the store. +func (rws *restartableWatchStore) NumEntries() uint64 { + return rws.numEntries.Load() +} + +// Synced returns whether the initial list of entries has been retrieved from +// the kvstore, and new events are currently being watched. +func (rws *restartableWatchStore) Synced() bool { + return rws.synced.Load() +} + +// Drain emits a deletion event for each known key. It shall be called only +// when no watch operation is in progress. +func (rws *restartableWatchStore) Drain() { + if rws.watching.Swap(true) { + rws.log.Panic("Cannot drain the watch store while still running") + } + defer rws.watching.Store(false) + + rws.log.Info("Draining restartable watch store") + rws.drainKeys(false) + rws.log.Info("Drained restartable watch store") +} + +// drainKeys emits synthetic deletion events: +// * staleOnly == true: for all keys marked as stale; +// * staleOnly == false: for all known keys; +func (rws *restartableWatchStore) drainKeys(staleOnly bool) { + for key, entry := range rws.state { + if !staleOnly || entry.stale { + rws.log.WithField(logfields.Key, key).Debug("Emitting deletion event for stale key") + rws.handleDelete(key) + } + } +} + +func (rws *restartableWatchStore) handleUpsert(key string, value []byte) { + entry := &rwsEntry{key: rws.keyCreator()} + if err := entry.key.Unmarshal(key, value); err != nil { + rws.log.WithFields(logrus.Fields{ + logfields.Key: key, + logfields.Value: string(value), + }).WithError(err).Warning("Unable to unmarshal value") + return + } + + rws.state[key] = entry + rws.numEntries.Store(uint64(len(rws.state))) + rws.entriesMetric.Set(float64(len(rws.state))) + rws.observer.OnUpdate(entry.key) +} + +func (rws *restartableWatchStore) handleDelete(key string) { + entry, ok := rws.state[key] + if !ok { + rws.log.WithField(logfields.Key, key).Warning("Received deletion event for unknown key") + return + } + + delete(rws.state, key) + rws.numEntries.Store(uint64(len(rws.state))) + rws.entriesMetric.Set(float64(len(rws.state))) + rws.observer.OnDelete(entry.key) +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go new file mode 100644 index 0000000000..44f85114c7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "context" + "path" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// WSMFunc if a function which can be registered in the WatchStoreManager. +type WSMFunc func(context.Context) + +// WatchStoreManager enables to register a set of functions to be asynchronously +// executed when the corresponding kvstore prefixes are synchronized (based on +// the implementation). +type WatchStoreManager interface { + // Register registers a function associated with a given kvstore prefix. + // It cannot be called once Run() has started. + Register(prefix string, function WSMFunc) + // Run starts the manager, blocking until the context is closed and all + // started functions terminated. + Run(ctx context.Context) +} + +// wsmCommon implements the common logic shared by WatchStoreManager implementations. +type wsmCommon struct { + wg sync.WaitGroup + functions map[string]WSMFunc + + running atomic.Bool + log *logrus.Entry +} + +func newWSMCommon(clusterName string) wsmCommon { + return wsmCommon{ + functions: make(map[string]WSMFunc), + log: log.WithField(logfields.ClusterName, clusterName), + } +} + +// Register registers a function associated with a given kvstore prefix. +// It cannot be called once Run() has started. +func (mgr *wsmCommon) Register(prefix string, function WSMFunc) { + if mgr.running.Load() { + mgr.log.Panic("Cannot call Register while the watch store manager is running") + } + + mgr.functions[prefix] = function +} + +func (mgr *wsmCommon) ready(ctx context.Context, prefix string) { + if fn := mgr.functions[prefix]; fn != nil { + mgr.log.WithField(logfields.Prefix, prefix).Debug("Starting function for kvstore prefix") + delete(mgr.functions, prefix) + + mgr.wg.Add(1) + go func() { + defer mgr.wg.Done() + fn(ctx) + mgr.log.WithField(logfields.Prefix, prefix).Debug("Function terminated for kvstore prefix") + }() + } else { + mgr.log.WithField(logfields.Prefix, prefix).Debug("Received sync event for unregistered prefix") + } +} + +func (mgr *wsmCommon) run() { + mgr.log.Info("Starting watch store manager") + if mgr.running.Swap(true) { + mgr.log.Panic("Cannot start the watch store manager twice") + } +} + +func (mgr *wsmCommon) wait() { + mgr.wg.Wait() + mgr.log.Info("Stopped watch store manager") +} + +type wsmSync struct { + wsmCommon + + clusterName string + backend WatchStoreBackend + store WatchStore + onUpdate func(prefix string) +} + +// NewWatchStoreManagerSync implements the WatchStoreManager interface, starting the +// registered functions only once the corresponding prefix sync canary has been received. +// This ensures that the synchronization of the keys hosted under the given prefix +// have been successfully synchronized from the external source, even in case an +// ephemeral kvstore is used. +func newWatchStoreManagerSync(backend WatchStoreBackend, clusterName string, factory Factory) WatchStoreManager { + mgr := wsmSync{ + wsmCommon: newWSMCommon(clusterName), + clusterName: clusterName, + backend: backend, + } + + mgr.store = factory.NewWatchStore(clusterName, KVPairCreator, &mgr) + return &mgr +} + +// Run starts the manager, blocking until the context is closed and all +// started functions terminated. +func (mgr *wsmSync) Run(ctx context.Context) { + mgr.run() + mgr.onUpdate = func(prefix string) { mgr.ready(ctx, prefix) } + mgr.store.Watch(ctx, mgr.backend, path.Join(kvstore.SyncedPrefix, mgr.clusterName)) + mgr.wait() +} + +func (mgr *wsmSync) OnUpdate(k Key) { mgr.onUpdate(k.GetKeyName()) } +func (mgr *wsmSync) OnDelete(k NamedKey) {} + +type wsmImmediate struct { + wsmCommon +} + +// NewWatchStoreManagerImmediate implements the WatchStoreManager interface, +// immediately starting the registered functions once Run() is executed. +func NewWatchStoreManagerImmediate(clusterName string) WatchStoreManager { + return &wsmImmediate{ + wsmCommon: newWSMCommon(clusterName), + } +} + +// Run starts the manager, blocking until the context is closed and all +// started functions terminated. +func (mgr *wsmImmediate) Run(ctx context.Context) { + mgr.run() + for prefix := range mgr.functions { + mgr.ready(ctx, prefix) + } + mgr.wait() +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go b/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go new file mode 100644 index 0000000000..b06887c0f8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +import ( + "github.com/sirupsen/logrus" +) + +var ( + traceEnabled bool +) + +// EnableTracing enables kvstore tracing +func EnableTracing() { + traceEnabled = true +} + +// Trace is used to trace kvstore debug messages +func Trace(format string, err error, fields logrus.Fields, a ...interface{}) { + if traceEnabled { + log.WithError(err).WithFields(fields).Debugf(format) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go b/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go new file mode 100644 index 0000000000..a3bf7cbf55 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package kvstore + +type watchState struct { + deletionMark bool +} + +type watcherCache map[string]watchState + +func (wc watcherCache) Exists(key []byte) bool { + if _, ok := wc[string(key)]; ok { + return true + } + + return false +} + +func (wc watcherCache) RemoveDeleted(f func(string)) { + for k, localKey := range wc { + if localKey.deletionMark { + f(k) + delete(wc, k) + } + } +} + +func (wc watcherCache) MarkAllForDeletion() { + for k := range wc { + wc[k] = watchState{deletionMark: true} + } +} + +func (wc watcherCache) MarkInUse(key []byte) { + wc[string(key)] = watchState{deletionMark: false} +} + +func (wc watcherCache) RemoveKey(key []byte) { + delete(wc, string(key)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/maps/bwmap/bwmap.go b/vendor/github.com/cilium/cilium/pkg/maps/bwmap/bwmap.go new file mode 100644 index 0000000000..c644286175 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/maps/bwmap/bwmap.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bwmap + +import ( + "fmt" + "sync" + + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/ebpf" + "github.com/cilium/cilium/pkg/maps/lxcmap" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" +) + +const ( + MapName = "cilium_throttle" + // Flow aggregate is per Pod, so same size as Endpoint map. + MapSize = lxcmap.MaxEntries + + // DefaultDropHorizon represents maximum allowed departure + // time delta in future. Given applications can set SO_TXTIME + // from user space this is a limit to prevent buggy applications + // to fill the FQ qdisc. + DefaultDropHorizon = 2 * time.Second +) + +type EdtId struct { + Id uint64 `align:"id"` +} + +func (k *EdtId) String() string { return fmt.Sprintf("%d", int(k.Id)) } +func (k *EdtId) New() bpf.MapKey { return &EdtId{} } + +type EdtInfo struct { + Bps uint64 `align:"bps"` + TimeLast uint64 `align:"t_last"` + TimeHorizonDrop uint64 `align:"t_horizon_drop"` + Pad [4]uint64 `align:"pad"` +} + +func (v *EdtInfo) String() string { return fmt.Sprintf("%d", int(v.Bps)) } +func (v *EdtInfo) New() bpf.MapValue { return &EdtInfo{} } + +var ( + throttleMap *bpf.Map + throttleMapInit = &sync.Once{} +) + +func ThrottleMap() *bpf.Map { + throttleMapInit.Do(func() { + throttleMap = bpf.NewMap( + MapName, + ebpf.Hash, + &EdtId{}, + &EdtInfo{}, + MapSize, + bpf.BPF_F_NO_PREALLOC, + ).WithCache().WithPressureMetric(). + WithEvents(option.Config.GetEventBufferConfig(MapName)) + }) + + return throttleMap +} + +func Update(Id uint16, Bps uint64) error { + return ThrottleMap().Update( + &EdtId{Id: uint64(Id)}, + &EdtInfo{Bps: Bps, TimeHorizonDrop: uint64(DefaultDropHorizon)}) +} + +func Delete(Id uint16) error { + return ThrottleMap().Delete( + &EdtId{Id: uint64(Id)}) +} + +func SilentDelete(Id uint16) error { + _, err := ThrottleMap().SilentDelete( + &EdtId{Id: uint64(Id)}) + + return err +} diff --git a/vendor/github.com/cilium/cilium/pkg/maps/bwmap/doc.go b/vendor/github.com/cilium/cilium/pkg/maps/bwmap/doc.go new file mode 100644 index 0000000000..b15be313f0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/maps/bwmap/doc.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package bwmap represents the BPF map used to enforce Pod bandwidth +// limitations via EDT (Earliest Departure Time) + BPF. +// +groupName=maps +package bwmap diff --git a/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/doc.go b/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/doc.go new file mode 100644 index 0000000000..01af96c4ad --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/doc.go @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package lxcmap represents the endpoints BPF map in the BPF programs. It is +// implemented as a hash table containing an entry for all local endpoints. +// The hashtable can be accessed through the key EndpointKey and points which +// points to the value EndpointInfo. +// +groupName=maps +package lxcmap diff --git a/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/lxcmap.go b/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/lxcmap.go new file mode 100644 index 0000000000..cd20a75255 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/maps/lxcmap/lxcmap.go @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lxcmap + +import ( + "fmt" + "net" + "net/netip" + "sync" + + "github.com/cilium/ebpf" + + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/option" +) + +const ( + MapName = "cilium_lxc" + + // MaxEntries represents the maximum number of endpoints in the map + MaxEntries = 65535 + + // PortMapMax represents the maximum number of Ports Mapping per container. + PortMapMax = 16 +) + +var ( + // LXCMap represents the BPF map for endpoints + lxcMap *bpf.Map + lxcMapOnce sync.Once +) + +func LXCMap() *bpf.Map { + lxcMapOnce.Do(func() { + lxcMap = bpf.NewMap(MapName, + ebpf.Hash, + &EndpointKey{}, + &EndpointInfo{}, + MaxEntries, + 0, + ).WithCache().WithPressureMetric(). + WithEvents(option.Config.GetEventBufferConfig(MapName)) + }) + return lxcMap +} + +const ( + // EndpointFlagHost indicates that this endpoint represents the host + EndpointFlagHost = 1 +) + +// EndpointFrontend is the interface to implement for an object to synchronize +// with the endpoint BPF map. +type EndpointFrontend interface { + LXCMac() mac.MAC + GetNodeMAC() mac.MAC + GetIfIndex() int + GetID() uint64 + IPv4Address() netip.Addr + IPv6Address() netip.Addr + GetIdentity() identity.NumericIdentity +} + +// GetBPFKeys returns all keys which should represent this endpoint in the BPF +// endpoints map +func GetBPFKeys(e EndpointFrontend) []*EndpointKey { + keys := []*EndpointKey{} + if e.IPv6Address().IsValid() { + keys = append(keys, NewEndpointKey(e.IPv6Address().AsSlice())) + } + + if e.IPv4Address().IsValid() { + keys = append(keys, NewEndpointKey(e.IPv4Address().AsSlice())) + } + + return keys +} + +// GetBPFValue returns the value which should represent this endpoint in the +// BPF endpoints map +// Must only be called if init() succeeded. +func GetBPFValue(e EndpointFrontend) (*EndpointInfo, error) { + mac, err := e.LXCMac().Uint64() + if err != nil { + return nil, fmt.Errorf("invalid LXC MAC: %v", err) + } + + nodeMAC, err := e.GetNodeMAC().Uint64() + if err != nil { + return nil, fmt.Errorf("invalid node MAC: %v", err) + } + + info := &EndpointInfo{ + IfIndex: uint32(e.GetIfIndex()), + // Store security identity in network byte order so it can be + // written into the packet without an additional byte order + // conversion. + LxcID: uint16(e.GetID()), + MAC: mac, + NodeMAC: nodeMAC, + SecID: e.GetIdentity().Uint32(), + } + + return info, nil + +} + +type pad3uint32 [3]uint32 + +// EndpointInfo represents the value of the endpoints BPF map. +// +// Must be in sync with struct endpoint_info in +type EndpointInfo struct { + IfIndex uint32 `align:"ifindex"` + Unused uint16 `align:"unused"` + LxcID uint16 `align:"lxc_id"` + Flags uint32 `align:"flags"` + // go alignment + _ uint32 + MAC mac.Uint64MAC `align:"mac"` + NodeMAC mac.Uint64MAC `align:"node_mac"` + SecID uint32 `align:"sec_id"` + Pad pad3uint32 `align:"pad"` +} + +type EndpointKey struct { + bpf.EndpointKey +} + +// NewEndpointKey returns an EndpointKey based on the provided IP address. The +// address family is automatically detected +func NewEndpointKey(ip net.IP) *EndpointKey { + return &EndpointKey{ + EndpointKey: bpf.NewEndpointKey(ip, 0), + } +} + +func (k *EndpointKey) New() bpf.MapKey { return &EndpointKey{} } + +// IsHost returns true if the EndpointInfo represents a host IP +func (v *EndpointInfo) IsHost() bool { + return v.Flags&EndpointFlagHost != 0 +} + +// String returns the human readable representation of an EndpointInfo +func (v *EndpointInfo) String() string { + if v.Flags&EndpointFlagHost != 0 { + return "(localhost)" + } + + return fmt.Sprintf("id=%-5d sec_id=%-5d flags=0x%04X ifindex=%-3d mac=%s nodemac=%s", + v.LxcID, + v.SecID, + v.Flags, + v.IfIndex, + v.MAC, + v.NodeMAC, + ) +} + +func (v *EndpointInfo) New() bpf.MapValue { return &EndpointInfo{} } + +// WriteEndpoint updates the BPF map with the endpoint information and links +// the endpoint information to all keys provided. +func WriteEndpoint(f EndpointFrontend) error { + info, err := GetBPFValue(f) + if err != nil { + return err + } + + // FIXME: Revert on failure + for _, v := range GetBPFKeys(f) { + if err := LXCMap().Update(v, info); err != nil { + return err + } + } + + return nil +} + +// AddHostEntry adds a special endpoint which represents the local host +func AddHostEntry(ip net.IP) error { + key := NewEndpointKey(ip) + ep := &EndpointInfo{Flags: EndpointFlagHost} + return LXCMap().Update(key, ep) +} + +// SyncHostEntry checks if a host entry exists in the lxcmap and adds one if needed. +// Returns boolean indicating if a new entry was added and an error. +func SyncHostEntry(ip net.IP) (bool, error) { + key := NewEndpointKey(ip) + value, err := LXCMap().Lookup(key) + if err != nil || value.(*EndpointInfo).Flags&EndpointFlagHost == 0 { + err = AddHostEntry(ip) + if err == nil { + return true, nil + } + } + return false, err +} + +// DeleteEntry deletes a single map entry +func DeleteEntry(ip net.IP) error { + return LXCMap().Delete(NewEndpointKey(ip)) +} + +// DeleteElement deletes the endpoint using all keys which represent the +// endpoint. It returns the number of errors encountered during deletion. +func DeleteElement(f EndpointFrontend) []error { + var errors []error + for _, k := range GetBPFKeys(f) { + if err := LXCMap().Delete(k); err != nil { + errors = append(errors, fmt.Errorf("Unable to delete key %v from %s: %s", k, bpf.MapPath(MapName), err)) + } + } + + return errors +} + +// DumpToMap dumps the contents of the lxcmap into a map and returns it +func DumpToMap() (map[string]EndpointInfo, error) { + m := map[string]EndpointInfo{} + callback := func(key bpf.MapKey, value bpf.MapValue) { + if info, ok := value.(*EndpointInfo); ok { + if endpointKey, ok := key.(*EndpointKey); ok { + m[endpointKey.ToIP().String()] = *info + } + } + } + + if err := LXCMap().DumpWithCallback(callback); err != nil { + return nil, fmt.Errorf("unable to read BPF endpoint list: %s", err) + } + + return m, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo.go b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo.go new file mode 100644 index 0000000000..62711a97ce --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package mountinfo + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + mountInfoFilepath = "/proc/self/mountinfo" +) + +// MountInfo is a struct representing information from /proc/pid/mountinfo. More +// information about file syntax: +// https://www.kernel.org/doc/Documentation/filesystems/proc.txt +type MountInfo struct { + MountID int64 + ParentID int64 + StDev string + Root string + MountPoint string + MountOptions string + OptionalFields []string + FilesystemType string + MountSource string + SuperOptions string +} + +// parseMountInfoFile returns a slice of *MountInfo with information parsed from +// the given reader +func parseMountInfoFile(r io.Reader) ([]*MountInfo, error) { + var result []*MountInfo + + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanLines) + + for scanner.Scan() { + mountInfoRaw := scanner.Text() + + // Optional fields (which are on the 7th position) are separated + // from the rest of fields by "-" character. The number of + // optional fields can be greater or equal to 1. + mountInfoSeparated := strings.Split(mountInfoRaw, " - ") + if len(mountInfoSeparated) != 2 { + return nil, fmt.Errorf("invalid mountinfo entry which has more that one '-' separator: %s", mountInfoRaw) + } + + // Extract fields from both sides of mountinfo + mountInfoLeft := strings.Split(mountInfoSeparated[0], " ") + mountInfoRight := strings.Split(mountInfoSeparated[1], " ") + + // Before '-' separator there should be 6 fields and unknown + // number of optional fields + if len(mountInfoLeft) < 6 { + return nil, fmt.Errorf("invalid mountinfo entry: %s", mountInfoRaw) + } + // After '-' separator there should be 3 fields + if len(mountInfoRight) != 3 { + return nil, fmt.Errorf("invalid mountinfo entry: %s", mountInfoRaw) + } + + mountID, err := strconv.ParseInt(mountInfoLeft[0], 10, 64) + if err != nil { + return nil, err + } + + parentID, err := strconv.ParseInt(mountInfoLeft[1], 10, 64) + if err != nil { + return nil, err + } + + // Extract optional fields, which start from 7th position + var optionalFields []string + for i := 6; i < len(mountInfoLeft); i++ { + optionalFields = append(optionalFields, mountInfoLeft[i]) + } + + result = append(result, &MountInfo{ + MountID: mountID, + ParentID: parentID, + StDev: mountInfoLeft[2], + Root: mountInfoLeft[3], + MountPoint: mountInfoLeft[4], + MountOptions: mountInfoLeft[5], + OptionalFields: optionalFields, + FilesystemType: mountInfoRight[0], + MountSource: mountInfoRight[1], + SuperOptions: mountInfoRight[2], + }) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return result, nil +} + +// GetMountInfo returns a slice of *MountInfo with information parsed from +// /proc/self/mountinfo +func GetMountInfo() ([]*MountInfo, error) { + fMounts, err := os.Open(mountInfoFilepath) + if err != nil { + return nil, fmt.Errorf("failed to open mount information at %s: %s", mountInfoFilepath, err) + } + defer fMounts.Close() + + return parseMountInfoFile(fMounts) +} diff --git a/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_linux.go b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_linux.go new file mode 100644 index 0000000000..638b9e4465 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_linux.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package mountinfo + +import ( + "errors" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +const ( + // FilesystemType superblock magic numbers for filesystems, + // to be used for IsMountFS. + FilesystemTypeBPFFS = unix.BPF_FS_MAGIC + FilesystemTypeCgroup2 = unix.CGROUP2_SUPER_MAGIC +) + +// IsMountFS returns two boolean values, checking +// - whether the path is a mount point; +// - if yes, whether its filesystem type is mntType. +// +// Note that this function can not detect bind mounts, +// and is not working properly when path="/". +func IsMountFS(mntType int64, path string) (bool, bool, error) { + var st, pst unix.Stat_t + + err := unix.Lstat(path, &st) + if err != nil { + if errors.Is(err, unix.ENOENT) { + // non-existent path can't be a mount point + return false, false, nil + } + return false, false, &os.PathError{Op: "lstat", Path: path, Err: err} + } + + parent := filepath.Dir(path) + err = unix.Lstat(parent, &pst) + if err != nil { + return false, false, &os.PathError{Op: "lstat", Path: parent, Err: err} + } + if st.Dev == pst.Dev { + // parent has the same dev -- not a mount point + return false, false, nil + } + + // Check the fstype + fst := unix.Statfs_t{} + err = unix.Statfs(path, &fst) + if err != nil { + return true, false, &os.PathError{Op: "statfs", Path: path, Err: err} + } + + return true, int64(fst.Type) == mntType, nil + +} diff --git a/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_unspecified.go b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_unspecified.go new file mode 100644 index 0000000000..a00a96a1df --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/mountinfo/mountinfo_unspecified.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package mountinfo + +import "errors" + +const ( + // Dummy FilesystemType superblock magic numbers for filesystems, + // to be used for IsMountFS. + FilesystemTypeBPFFS = 0 +) + +// IsMountFS returns two boolean values, checking +// - whether the path is a mount point; +// - if yes, whether its filesystem type is mntType. +// +// Note that this function can not detect bind mounts, +// and is not working properly when path="/". +func IsMountFS(mntType int64, path string) (bool, bool, error) { + return false, false, errors.New("not implemented") +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/address.go b/vendor/github.com/cilium/cilium/pkg/node/address.go new file mode 100644 index 0000000000..b61f12634d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/address.go @@ -0,0 +1,631 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +import ( + "bufio" + "context" + "fmt" + "net" + "os" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/byteorder" + "github.com/cilium/cilium/pkg/cidr" + "github.com/cilium/cilium/pkg/common" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + wgTypes "github.com/cilium/cilium/pkg/wireguard/types" +) + +const preferPublicIP bool = true + +var ( + // addrsMu protects addrs. Outside the addresses struct + // so that we can Uninitialize() without linter complaining + // about lock copying. + addrsMu lock.RWMutex + addrs addresses + + // localNode holds the current state of the local "types.Node". + // This is defined here until all uses of the getters and + // setters in this file have been migrated to use LocalNodeStore + // directly. + // Initialized to proper instance via an invoke function in LocalNodeStoreCell, + // or temporarily in tests with 'WithTestLocalNodeStore'. + localNode *LocalNodeStore +) + +func getLocalNode() LocalNode { + n, err := localNode.Get(context.TODO()) + if err != nil { + // Only expecting errors if we're called after LocalNodeStore has stopped, e.g. + // we have a component that uses the legacy getters and setters here and does + // not depend on LocalNodeStore. + log.WithError(err).Fatal("getLocalNode: unexpected error") + } + return n +} + +type addresses struct { + ipv4Loopback net.IP + ipv4MasqAddrs map[string]net.IP // iface name => ip addr + ipv6MasqAddrs map[string]net.IP // iface name => ip addr + routerInfo RouterInfo +} + +type RouterInfo interface { + GetIPv4CIDRs() []net.IPNet +} + +func makeIPv6HostIP() net.IP { + ipstr := "fc00::10CA:1" + ip := net.ParseIP(ipstr) + if ip == nil { + log.WithField(logfields.IPAddr, ipstr).Fatal("Unable to parse IP") + } + + return ip +} + +// InitDefaultPrefix initializes the node address and allocation prefixes with +// default values derived from the system. device can be set to the primary +// network device of the system in which case the first address with global +// scope will be regarded as the system's node address. +func InitDefaultPrefix(device string) { + localNode.Update(func(n *LocalNode) { + SetDefaultPrefix(option.Config, device, n) + }) +} + +func SetDefaultPrefix(cfg *option.DaemonConfig, device string, node *LocalNode) { + if cfg.EnableIPv4 { + isIPv6 := false + + ip, err := firstGlobalV4Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP) + if err != nil { + return + } + + if node.GetNodeIP(isIPv6) == nil { + node.SetNodeInternalIP(ip) + } + + ipv4range := node.IPv4AllocCIDR + ipv6range := node.IPv6AllocCIDR + + if ipv4range == nil { + // If the IPv6AllocRange is not nil then the IPv4 allocation should be + // derived from the IPv6AllocRange. + // vvvv vvvv + // FD00:0000:0000:0000:0000:0000:0000:0000 + if ipv6range != nil { + ip = net.IPv4( + ipv6range.IP[8], + ipv6range.IP[9], + ipv6range.IP[10], + ipv6range.IP[11]) + } + v4range := fmt.Sprintf(defaults.DefaultIPv4Prefix+"/%d", + ip.To4()[3], defaults.DefaultIPv4PrefixLen) + _, ip4net, err := net.ParseCIDR(v4range) + if err != nil { + log.WithError(err).WithField(logfields.V4Prefix, v4range).Panic("BUG: Invalid default IPv4 prefix") + } + + node.IPv4AllocCIDR = cidr.NewCIDR(ip4net) + log.WithField(logfields.V4Prefix, node.IPv4AllocCIDR).Info("Using autogenerated IPv4 allocation range") + } + } + + if cfg.EnableIPv6 { + isIPv6 := true + ipv4range := node.IPv4AllocCIDR + ipv6range := node.IPv6AllocCIDR + + if node.GetNodeIP(isIPv6) == nil { + // Find a IPv6 node address first + addr, _ := firstGlobalV6Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP) + if addr == nil { + addr = makeIPv6HostIP() + } + node.SetNodeInternalIP(addr) + } + + if ipv6range == nil && ipv4range != nil { + // The IPv6 allocation should be derived from the IPv4 allocation. + ip := ipv4range.IP + v6range := fmt.Sprintf("%s%02x%02x:%02x%02x:0:0/%d", + cfg.IPv6ClusterAllocCIDRBase, ip[0], ip[1], ip[2], ip[3], 96) + + _, ip6net, err := net.ParseCIDR(v6range) + if err != nil { + log.WithError(err).WithField(logfields.V6Prefix, v6range).Panic("BUG: Invalid default IPv6 prefix") + } + + node.IPv6AllocCIDR = cidr.NewCIDR(ip6net) + log.WithField(logfields.V6Prefix, node.IPv6AllocCIDR).Info("Using autogenerated IPv6 allocation range") + } + } +} + +// InitBPFMasqueradeAddrs initializes BPF masquerade addrs for the given devices. +func InitBPFMasqueradeAddrs(devices []string) error { + addrsMu.Lock() + defer addrsMu.Unlock() + + masqIPFromDevice := option.Config.DeriveMasqIPAddrFromDevice + + if option.Config.EnableIPv4 { + addrs.ipv4MasqAddrs = make(map[string]net.IP, len(devices)) + err := initMasqueradeV4Addrs(addrs.ipv4MasqAddrs, masqIPFromDevice, devices, logfields.IPv4) + if err != nil { + return err + } + } + if option.Config.EnableIPv6 { + addrs.ipv6MasqAddrs = make(map[string]net.IP, len(devices)) + return initMasqueradeV6Addrs(addrs.ipv6MasqAddrs, masqIPFromDevice, devices, logfields.IPv6) + } + + return nil +} + +func clone(ip net.IP) net.IP { + if ip == nil { + return nil + } + dup := make(net.IP, len(ip)) + copy(dup, ip) + return dup +} + +// GetIPv4Loopback returns the loopback IPv4 address of this node. +func GetIPv4Loopback() net.IP { + addrsMu.RLock() + defer addrsMu.RUnlock() + return clone(addrs.ipv4Loopback) +} + +// SetIPv4Loopback sets the loopback IPv4 address of this node. +func SetIPv4Loopback(ip net.IP) { + addrsMu.Lock() + addrs.ipv4Loopback = clone(ip) + addrsMu.Unlock() +} + +// GetIPv4AllocRange returns the IPv4 allocation prefix of this node +func GetIPv4AllocRange() *cidr.CIDR { + return getLocalNode().IPv4AllocCIDR.DeepCopy() +} + +// GetIPv6AllocRange returns the IPv6 allocation prefix of this node +func GetIPv6AllocRange() *cidr.CIDR { + return getLocalNode().IPv6AllocCIDR.DeepCopy() +} + +// GetIPv4 returns one of the IPv4 node address available with the following +// priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type. +// It must be reachable on the network. +func GetIPv4() net.IP { + n := getLocalNode() + return clone(n.GetNodeIP(false)) +} + +// GetInternalIPv4 returns node internal ipv4 address else return nil. +func GetInternalIPv4() net.IP { + n := getLocalNode() + return clone(n.GetNodeInternalIPv4()) +} + +// GetInternalIPv6 returns node internal ipv6 address else return nil. +func GetInternalIPv6() net.IP { + n := getLocalNode() + return clone(n.GetNodeInternalIPv6()) +} + +// GetCiliumEndpointNodeIP is the node IP that will be referenced by CiliumEndpoints with endpoints +// running on this node. +func GetCiliumEndpointNodeIP() string { + if option.Config.EnableIPv4 { + return GetIPv4().String() + } + return GetIPv6().String() +} + +// SetInternalIPv4Router sets the cilium internal IPv4 node address, it is allocated from the node prefix. +// This must not be conflated with k8s internal IP as this IP address is only relevant within the +// Cilium-managed network (this means within the node for direct routing mode and on the overlay +// for tunnel mode). +func SetInternalIPv4Router(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.SetCiliumInternalIP(ip) + }) +} + +// GetInternalIPv4Router returns the cilium internal IPv4 node address. This must not be conflated with +// k8s internal IP as this IP address is only relevant within the Cilium-managed network (this means +// within the node for direct routing mode and on the overlay for tunnel mode). +func GetInternalIPv4Router() net.IP { + n := getLocalNode() + return n.GetCiliumInternalIP(false) +} + +// GetK8sExternalIPv4 returns the external IPv4 node address. It must be a public IP that is routable +// on the network as well as the internet. It can return nil if no External IPv4 address is assigned. +func GetK8sExternalIPv4() net.IP { + n := getLocalNode() + return n.GetExternalIP(false) +} + +// GetRouterInfo returns additional information for the router, the cilium_host interface. +func GetRouterInfo() RouterInfo { + addrsMu.RLock() + defer addrsMu.RUnlock() + return addrs.routerInfo +} + +// SetRouterInfo sets additional information for the router, the cilium_host interface. +func SetRouterInfo(info RouterInfo) { + addrsMu.Lock() + addrs.routerInfo = info + addrsMu.Unlock() +} + +// GetHostMasqueradeIPv4 returns the IPv4 address to be used for masquerading +// any traffic that is being forwarded from the host into the Cilium cluster. +func GetHostMasqueradeIPv4() net.IP { + return GetInternalIPv4Router() +} + +// SetIPv4AllocRange sets the IPv4 address pool to use when allocating +// addresses for local endpoints +func SetIPv4AllocRange(net *cidr.CIDR) { + localNode.Update(func(n *LocalNode) { + n.IPv4AllocCIDR = net + }) +} + +// GetMasqIPv4AddrsWithDevices returns the map iface => BPF masquerade IPv4. +func GetMasqIPv4AddrsWithDevices() map[string]net.IP { + addrsMu.RLock() + defer addrsMu.RUnlock() + return copyStringToNetIPMap(addrs.ipv4MasqAddrs) +} + +// GetMasqIPv6AddrsWithDevices returns the map iface => BPF masquerade IPv6. +func GetMasqIPv6AddrsWithDevices() map[string]net.IP { + addrsMu.RLock() + defer addrsMu.RUnlock() + return copyStringToNetIPMap(addrs.ipv6MasqAddrs) +} + +// SetIPv6NodeRange sets the IPv6 address pool to be used on this node +func SetIPv6NodeRange(net *cidr.CIDR) { + localNode.Update(func(n *LocalNode) { + n.IPv6AllocCIDR = net + }) +} + +// AutoComplete completes the parts of addressing that can be auto derived +func AutoComplete() error { + InitDefaultPrefix(option.Config.DirectRoutingDevice) + + if option.Config.EnableIPv6 && GetIPv6AllocRange() == nil { + return fmt.Errorf("IPv6 allocation CIDR is not configured. Please specify --%s", option.IPv6Range) + } + + if option.Config.EnableIPv4 && GetIPv4AllocRange() == nil { + return fmt.Errorf("IPv4 allocation CIDR is not configured. Please specify --%s", option.IPv4Range) + } + + return nil +} + +// ValidatePostInit validates the entire addressing setup and completes it as +// required +func ValidatePostInit() error { + if option.Config.EnableIPv4 || option.Config.TunnelingEnabled() { + if GetIPv4() == nil { + return fmt.Errorf("external IPv4 node address could not be derived, please configure via --ipv4-node") + } + } + + if option.Config.EnableIPv4 && GetInternalIPv4Router() == nil { + return fmt.Errorf("BUG: Internal IPv4 node address was not configured") + } + + return nil +} + +// GetIPv6 returns the IPv6 address of the node +func GetIPv6() net.IP { + n := getLocalNode() + return clone(n.GetNodeIP(true)) +} + +// GetHostMasqueradeIPv6 returns the IPv6 address to be used for masquerading +// any traffic that is being forwarded from the host into the Cilium cluster. +func GetHostMasqueradeIPv6() net.IP { + return GetIPv6Router() +} + +// GetIPv6Router returns the IPv6 address of the router, e.g. address +// of cilium_host device. +func GetIPv6Router() net.IP { + n := getLocalNode() + return clone(n.GetCiliumInternalIP(true)) +} + +// SetIPv6Router sets the IPv6 address of the router address, e.g. address +// of cilium_host device. +func SetIPv6Router(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.SetCiliumInternalIP(ip) + }) +} + +// GetK8sExternalIPv6 returns the external IPv6 node address. +func GetK8sExternalIPv6() net.IP { + n := getLocalNode() + return clone(n.GetExternalIP(false)) +} + +// GetNodeAddressing returns the NodeAddressing model for the local IPs. +func GetNodeAddressing() *models.NodeAddressing { + a := &models.NodeAddressing{} + + if option.Config.EnableIPv6 { + a.IPV6 = &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv6, + IP: GetIPv6Router().String(), + AllocRange: GetIPv6AllocRange().String(), + } + } + + if option.Config.EnableIPv4 { + a.IPV4 = &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv4, + IP: GetInternalIPv4Router().String(), + AllocRange: GetIPv4AllocRange().String(), + } + } + + return a +} + +func getCiliumHostIPsFromFile(nodeConfig string) (ipv4GW, ipv6Router net.IP) { + // ipLen is the length of the IP address stored in the node_config.h + // it has the same length for both IPv4 and IPv6. + const ipLen = net.IPv6len + + var hasIPv4, hasIPv6 bool + f, err := os.Open(nodeConfig) + switch { + case err != nil: + default: + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + txt := scanner.Text() + switch { + case !hasIPv6 && strings.Contains(txt, defaults.RestoreV6Addr): + defineLine := strings.Split(txt, defaults.RestoreV6Addr) + if len(defineLine) != 2 { + continue + } + ipv6 := common.C2GoArray(defineLine[1]) + if len(ipv6) != ipLen { + continue + } + ipv6Router = net.IP(ipv6) + hasIPv6 = true + case !hasIPv4 && strings.Contains(txt, defaults.RestoreV4Addr): + defineLine := strings.Split(txt, defaults.RestoreV4Addr) + if len(defineLine) != 2 { + continue + } + ipv4 := common.C2GoArray(defineLine[1]) + if len(ipv4) != ipLen { + continue + } + ipv4GW = net.IP(ipv4) + hasIPv4 = true + + // Legacy cases based on the header defines: + case !hasIPv4 && strings.Contains(txt, "IPV4_GATEWAY"): + // #define IPV4_GATEWAY 0xee1c000a + defineLine := strings.Split(txt, " ") + if len(defineLine) != 3 { + continue + } + ipv4GWHex := strings.TrimPrefix(defineLine[2], "0x") + ipv4GWUint64, err := strconv.ParseUint(ipv4GWHex, 16, 32) + if err != nil { + continue + } + if ipv4GWUint64 != 0 { + bs := make([]byte, net.IPv4len) + byteorder.Native.PutUint32(bs, uint32(ipv4GWUint64)) + ipv4GW = net.IPv4(bs[0], bs[1], bs[2], bs[3]) + hasIPv4 = true + } + case !hasIPv6 && strings.Contains(txt, " ROUTER_IP "): + // #define ROUTER_IP 0xf0, 0xd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0xd6 + defineLine := strings.Split(txt, " ROUTER_IP ") + if len(defineLine) != 2 { + continue + } + ipv6 := common.C2GoArray(defineLine[1]) + if len(ipv6) != net.IPv6len { + continue + } + ipv6Router = net.IP(ipv6) + hasIPv6 = true + } + } + } + return ipv4GW, ipv6Router +} + +// ExtractCiliumHostIPFromFS returns the Cilium IPv4 gateway and router IPv6 address from +// the node_config.h file if is present; or by deriving it from +// defaults.HostDevice interface, on which only the IPv4 is possible to derive. +func ExtractCiliumHostIPFromFS() (ipv4GW, ipv6Router net.IP) { + if !option.Config.EnableHostIPRestore { + return nil, nil + } + + nodeConfig := option.Config.GetNodeConfigPath() + ipv4GW, ipv6Router = getCiliumHostIPsFromFile(nodeConfig) + if ipv4GW != nil || ipv6Router != nil { + log.WithFields(logrus.Fields{ + "ipv4": ipv4GW, + "ipv6": ipv6Router, + "file": nodeConfig, + }).Info("Restored router address from node_config") + return ipv4GW, ipv6Router + } + return getCiliumHostIPsFromNetDev(defaults.HostDevice) +} + +// SetIPsecKeyIdentity sets the IPsec key identity an opaque value used to +// identity encryption keys used on the node. +func SetIPsecKeyIdentity(id uint8) { + localNode.Update(func(n *LocalNode) { + n.EncryptionKey = id + }) +} + +// GetK8sNodeIPs returns k8s Node IP addr. +func GetK8sNodeIP() net.IP { + n := getLocalNode() + return n.GetK8sNodeIP() +} + +func GetWireguardPubKey() string { + return getLocalNode().WireguardPubKey +} + +func GetOptOutNodeEncryption() bool { + return getLocalNode().OptOutNodeEncryption +} + +// SetEndpointHealthIPv4 sets the IPv4 cilium-health endpoint address. +func SetEndpointHealthIPv4(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.IPv4HealthIP = ip + }) +} + +// GetEndpointHealthIPv4 returns the IPv4 cilium-health endpoint address. +func GetEndpointHealthIPv4() net.IP { + return getLocalNode().IPv4HealthIP +} + +// SetEndpointHealthIPv6 sets the IPv6 cilium-health endpoint address. +func SetEndpointHealthIPv6(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.IPv6HealthIP = ip + }) +} + +// GetEndpointHealthIPv6 returns the IPv6 cilium-health endpoint address. +func GetEndpointHealthIPv6() net.IP { + return getLocalNode().IPv6HealthIP +} + +// SetIngressIPv4 sets the local IPv4 source address for Cilium Ingress. +func SetIngressIPv4(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.IPv4IngressIP = ip + }) +} + +// GetIngressIPv4 returns the local IPv4 source address for Cilium Ingress. +func GetIngressIPv4() net.IP { + return getLocalNode().IPv4IngressIP +} + +// SetIngressIPv6 sets the local IPv6 source address for Cilium Ingress. +func SetIngressIPv6(ip net.IP) { + localNode.Update(func(n *LocalNode) { + n.IPv6IngressIP = ip + }) +} + +// GetIngressIPv6 returns the local IPv6 source address for Cilium Ingress. +func GetIngressIPv6() net.IP { + return getLocalNode().IPv6IngressIP +} + +// GetEndpointEncryptKeyIndex returns the encryption key value for an endpoint +// owned by the local node. +// With IPSec encryption, this is the ID of the currently loaded key. +// With WireGuard, this returns a non-zero static value. +// Note that the key index returned by this function is only valid for _endpoints_ +// of the local node. If you want to obtain the key index of the local node itself, +// access the `EncryptionKey` field via the LocalNodeStore. +func GetEndpointEncryptKeyIndex() uint8 { + switch { + case option.Config.EnableIPSec: + return getLocalNode().EncryptionKey + case option.Config.EnableWireguard: + return wgTypes.StaticEncryptKey + + } + return 0 +} + +func copyStringToNetIPMap(in map[string]net.IP) map[string]net.IP { + out := make(map[string]net.IP, len(in)) + for iface, ip := range in { + dup := make(net.IP, len(ip)) + copy(dup, ip) + out[iface] = dup + } + return out +} + +// WithTestLocalNodeStore sets the 'localNode' to a temporary instance and +// runs the given test. Afterwards the 'localNode' is restored to nil. +// This is a temporary workaround for tests until the LocalNodeStoreCell can be +// used. +func WithTestLocalNodeStore(runTest func()) { + SetTestLocalNodeStore() + defer UnsetTestLocalNodeStore() + runTest() +} + +func SetTestLocalNodeStore() { + if localNode != nil { + panic("localNode already set") + } + + // Set the localNode global variable temporarily so that the legacy getters + // and setters can access it. + localNode = NewTestLocalNodeStore(LocalNode{}) +} + +func UnsetTestLocalNodeStore() { + localNode = nil +} + +// UpdateLocalNodeInTest provides access to modifying the local node +// information from tests that are not yet using hive and the LocalNodeStoreCell. +func UpdateLocalNodeInTest(mod func(n *LocalNode)) { + if localNode == nil { + panic("localNode not set, use node.LocalNodeStoreCell or WithTestLocalNodeStore()?") + } + localNode.Update(mod) +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/address_linux.go b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go new file mode 100644 index 0000000000..b0f66d23e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !darwin + +package node + +import ( + "fmt" + "net" + "sort" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic bool) (net.IP, error) { + var link netlink.Link + var ipLen int + var err error + + ipsToExclude := GetExcludedIPs() + linkScopeMax := unix.RT_SCOPE_UNIVERSE + if family == netlink.FAMILY_V4 { + ipLen = 4 + } else { + ipLen = 16 + } + + if intf != "" && intf != "undefined" { + link, err = netlink.LinkByName(intf) + if err != nil { + link = nil + } else { + ipsToExclude = []net.IP{} + } + } + +retryInterface: + addr, err := netlink.AddrList(link, family) + if err != nil { + return nil, err + } + +retryScope: + ipsPublic := []netlink.Addr{} + ipsPrivate := []netlink.Addr{} + hasPreferred := false + + for _, a := range addr { + if a.Scope > linkScopeMax { + continue + } + if ip.ListContainsIP(ipsToExclude, a.IP) { + continue + } + if len(a.IP) < ipLen { + continue + } + isPreferredIP := a.IP.Equal(preferredIP) + if a.Flags&unix.IFA_F_SECONDARY > 0 && !isPreferredIP { + // Skip secondary addresses if they're not the preferredIP + continue + } + + if ip.IsPublicAddr(a.IP) { + ipsPublic = append(ipsPublic, a) + } else { + ipsPrivate = append(ipsPrivate, a) + } + // If the IP is the same as the preferredIP, that + // means that maybe it is restored from node_config.h, + // so if it is present we prefer this one, even if it + // is a secondary address. + if isPreferredIP { + hasPreferred = true + } + } + + if hasPreferred && !preferPublic { + return preferredIP, nil + } + + if len(ipsPublic) != 0 { + if hasPreferred && ip.IsPublicAddr(preferredIP) { + return preferredIP, nil + } + + // Just make sure that we always return the same one and not a + // random one. More info in the issue GH-7637. + sort.SliceStable(ipsPublic, func(i, j int) bool { + return ipsPublic[i].LinkIndex < ipsPublic[j].LinkIndex + }) + + return ipsPublic[0].IP, nil + } + + if len(ipsPrivate) != 0 { + if hasPreferred && !ip.IsPublicAddr(preferredIP) { + return preferredIP, nil + } + + // Same stable order, see above ipsPublic. + sort.SliceStable(ipsPrivate, func(i, j int) bool { + return ipsPrivate[i].LinkIndex < ipsPrivate[j].LinkIndex + }) + + return ipsPrivate[0].IP, nil + } + + // First, if a device is specified, fall back to anything wider + // than link (site, custom, ...) before trying all devices. + if linkScopeMax != unix.RT_SCOPE_SITE { + linkScopeMax = unix.RT_SCOPE_SITE + goto retryScope + } + + // Fall back with retry for all interfaces with full scope again + // (which then goes back to lower scope again for all interfaces + // before we give up completely). + if link != nil { + linkScopeMax = unix.RT_SCOPE_UNIVERSE + link = nil + goto retryInterface + } + + return nil, fmt.Errorf("No address found") +} + +// firstGlobalV4Addr returns the first IPv4 global IP of an interface, +// where the IPs are sorted in creation order (oldest to newest). +// +// All secondary IPs, except the preferredIP, are filtered out. +// +// Public IPs are preferred over private ones. When intf is defined only +// IPs belonging to that interface are considered. +// +// If preferredIP is present in the IP list it is returned irrespective of +// the sort order. However, if preferPublic is true and preferredIP is a +// private IP, a public IP will be returned if it is assigned to the intf +// +// Passing intf and preferredIP will only return preferredIP if it is in +// the IPs that belong to intf. +// +// In all cases, if intf is not found all interfaces are considered. +// +// If a intf-specific global address couldn't be found, we retry to find +// an address with reduced scope (site, custom) on that particular device. +// +// If the latter fails as well, we retry on all interfaces beginning with +// universe scope again (and then falling back to reduced scope). +// +// In case none of the above helped, we bail out with error. +func firstGlobalV4Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) { + return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V4, preferPublic) +} + +// firstGlobalV6Addr returns first IPv6 global IP of an interface, see +// firstGlobalV4Addr for more details. +func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) { + return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V6, preferPublic) +} + +// getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns +// it +func getCiliumHostIPsFromNetDev(devName string) (ipv4GW, ipv6Router net.IP) { + hostDev, err := netlink.LinkByName(devName) + if err != nil { + return nil, nil + } + addrs, err := netlink.AddrList(hostDev, netlink.FAMILY_ALL) + if err != nil { + return nil, nil + } + for _, addr := range addrs { + if addr.IP.To4() != nil { + if addr.Scope == int(netlink.SCOPE_LINK) { + ipv4GW = addr.IP + } + } else { + if addr.Scope != int(netlink.SCOPE_LINK) { + ipv6Router = addr.IP + } + } + } + + if ipv4GW != nil || ipv6Router != nil { + log.WithFields(logrus.Fields{ + "ipv4": ipv4GW, + "ipv6": ipv6Router, + "device": devName, + }).Info("Restored router address from device") + } + + return ipv4GW, ipv6Router +} + +// initMasqueradeAddrs initializes BPF masquerade addresses for the given +// devices. +func initMasqueradeAddrs(masqAddrs map[string]net.IP, family int, masqIPFromDevice string, devices []string, logfield string) error { + if ifaceName := masqIPFromDevice; ifaceName != "" { + ip, err := firstGlobalAddr(ifaceName, nil, family, preferPublicIP) + if err != nil { + return fmt.Errorf("Failed to determine IP of %s for BPF masq", ifaceName) + } + for _, device := range devices { + masqAddrs[device] = ip + } + return nil + } + + for _, device := range devices { + ip, err := firstGlobalAddr(device, GetK8sNodeIP(), family, preferPublicIP) + if err != nil { + return fmt.Errorf("Failed to determine IP of %s for BPF masq", device) + } + + masqAddrs[device] = ip + log.WithFields(logrus.Fields{ + logfield: ip, + logfields.Device: device, + }).Info("Masquerading IP selected for device") + } + + return nil +} + +// initMasqueradeV4Addrs initializes BPF masquerade IPv4 addresses for the +// given devices. +func initMasqueradeV4Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error { + return initMasqueradeAddrs(masqAddrs, netlink.FAMILY_V4, masqIPFromDevice, devices, logfield) +} + +// initMasqueradeV6Addrs initializes BPF masquerade IPv6 addresses for the +// given devices. +func initMasqueradeV6Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error { + return initMasqueradeAddrs(masqAddrs, netlink.FAMILY_V6, masqIPFromDevice, devices, logfield) +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/address_other.go b/vendor/github.com/cilium/cilium/pkg/node/address_other.go new file mode 100644 index 0000000000..af2ba9fae4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/address_other.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package node + +import "net" + +func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic bool) (net.IP, error) { + return net.IP{}, nil +} + +func firstGlobalV4Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) { + return net.IP{}, nil +} + +func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) { + return net.IP{}, nil +} + +func initMasqueradeV4Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error { + return nil +} + +func initMasqueradeV6Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error { + return nil +} + +// getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns +// it +func getCiliumHostIPsFromNetDev(devName string) (ipv4GW, ipv6Router net.IP) { + return net.IP{}, net.IP{} +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/doc.go b/vendor/github.com/cilium/cilium/pkg/node/doc.go new file mode 100644 index 0000000000..076044279a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package node provides functionality related to the local and remote node +// addresses +package node diff --git a/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go b/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go new file mode 100644 index 0000000000..4296484fe7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +const ( + templateHostEndpointID = uint64(0xffff) +) + +var ( + endpointID = templateHostEndpointID +) + +// GetEndpointID returns the ID of the host endpoint for this node. +func GetEndpointID() uint64 { + return endpointID +} + +// SetEndpointID sets the ID of the host endpoint this node. +func SetEndpointID(id uint64) { + endpointID = id +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/ip.go b/vendor/github.com/cilium/cilium/pkg/node/ip.go new file mode 100644 index 0000000000..fe3a76e6ed --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/ip.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +import "net" + +var excludedIPs []net.IP + +// GetExcludedIPs returns a list of IPs from netdevices that Cilium +// needs to exclude to operate +func GetExcludedIPs() []net.IP { + return excludedIPs +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go new file mode 100644 index 0000000000..21a7176fc4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +import ( + "strings" + + "github.com/vishvananda/netlink" +) + +func init() { + initExcludedIPs() +} + +func initExcludedIPs() { + // We exclude below bad device prefixes from address selection ... + prefixes := []string{ + "docker", + } + links, err := netlink.LinkList() + if err != nil { + return + } + for _, l := range links { + // ... also all down devices since they won't be reachable. + // + // We need to check for both "up" and "unknown" state, as some + // drivers may not implement operstate handling, and just report + // their state as unknown even though they are operational. + if l.Attrs().OperState == netlink.OperUp || + l.Attrs().OperState == netlink.OperUnknown { + skip := true + for _, p := range prefixes { + if strings.HasPrefix(l.Attrs().Name, p) { + skip = false + break + } + } + if skip { + continue + } + } + addr, err := netlink.AddrList(l, netlink.FAMILY_ALL) + if err != nil { + continue + } + for _, a := range addr { + excludedIPs = append(excludedIPs, a.IP) + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go b/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go new file mode 100644 index 0000000000..0e8bfaf84c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +import ( + "context" + "sync" + + k8stypes "k8s.io/apimachinery/pkg/types" + + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/node/types" + "github.com/cilium/cilium/pkg/stream" +) + +type LocalNode struct { + types.Node + // OptOutNodeEncryption will make the local node opt-out of node-to-node + // encryption + OptOutNodeEncryption bool + // Unique identifier of the Kubernetes node, used to construct the + // corresponding owner reference. + UID k8stypes.UID + // ID of the node assigned by the cloud provider. + ProviderID string +} + +// LocalNodeSynchronizer specifies how to build, and keep synchronized the local +// node object. +type LocalNodeSynchronizer interface { + InitLocalNode(context.Context, *LocalNode) error + SyncLocalNode(context.Context, *LocalNodeStore) +} + +// LocalNodeStoreCell provides the LocalNodeStore instance. +// The LocalNodeStore is the canonical owner of `types.Node` for the local node and +// provides a reactive API for observing and updating it. +var LocalNodeStoreCell = cell.Module( + "local-node-store", + "Provides LocalNodeStore for observing and updating local node info", + + cell.Provide(NewLocalNodeStore), +) + +// LocalNodeStoreParams are the inputs needed for constructing LocalNodeStore. +type LocalNodeStoreParams struct { + cell.In + + Lifecycle cell.Lifecycle + Sync LocalNodeSynchronizer `optional:"true"` +} + +// LocalNodeStore is the canonical owner for the local node object and provides +// a reactive API for observing and updating the state. +type LocalNodeStore struct { + // Changes to the local node are observable. + stream.Observable[LocalNode] + + mu lock.Mutex + value LocalNode + emit func(LocalNode) + complete func(error) +} + +func NewTestLocalNodeStore(mockNode LocalNode) *LocalNodeStore { + src, emit, complete := stream.Multicast[LocalNode](stream.EmitLatest) + emit(mockNode) + return &LocalNodeStore{ + Observable: src, + emit: emit, + complete: complete, + value: mockNode, + } +} + +func NewLocalNodeStore(params LocalNodeStoreParams) (*LocalNodeStore, error) { + src, emit, complete := stream.Multicast[LocalNode](stream.EmitLatest) + + s := &LocalNodeStore{ + Observable: src, + value: LocalNode{Node: types.Node{ + // Explicitly initialize the labels and annotations maps, so that + // we don't need to always check for nil values. + Labels: make(map[string]string), + Annotations: make(map[string]string), + }}, + } + + bctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + + params.Lifecycle.Append(cell.Hook{ + OnStart: func(ctx cell.HookContext) error { + s.mu.Lock() + defer s.mu.Unlock() + if params.Sync != nil { + if err := params.Sync.InitLocalNode(ctx, &s.value); err != nil { + return err + } + + // Start the synchronization process in background + wg.Add(1) + go func() { + params.Sync.SyncLocalNode(bctx, s) + wg.Done() + }() + } + + // Set the global variable still used by getters + // and setters in address.go. We're setting it in Start + // to catch uses of it before it's initialized. + localNode = s + + s.emit = emit + s.complete = complete + emit(s.value) + return nil + }, + OnStop: func(cell.HookContext) error { + // Stop the synchronization process (no-op if it had not been started) + cancel() + wg.Wait() + + s.mu.Lock() + s.complete(nil) + s.complete = nil + s.emit = nil + s.mu.Unlock() + + localNode = nil + return nil + }, + }) + + return s, nil +} + +// Get retrieves the current local node. Use Get() only for inspecting the state, +// e.g. in API handlers. Do not assume the value does not change over time. +// Blocks until the store has been initialized. +func (s *LocalNodeStore) Get(ctx context.Context) (LocalNode, error) { + // Subscribe to the stream of updates and take the first (latest) state. + return stream.First[LocalNode](ctx, s) +} + +// Update modifies the local node with a mutator. The updated value +// is passed to observers. +func (s *LocalNodeStore) Update(update func(*LocalNode)) { + s.mu.Lock() + defer s.mu.Unlock() + + update(&s.value) + + if s.emit != nil { + s.emit(s.value) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/logfields.go b/vendor/github.com/cilium/cilium/pkg/node/logfields.go new file mode 100644 index 0000000000..5b12bc9f1b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/logfields.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package node + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "node") diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go b/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go new file mode 100644 index 0000000000..f992f91369 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "node") diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/node.go b/vendor/github.com/cilium/cilium/pkg/node/types/node.go new file mode 100644 index 0000000000..c76efae3f7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/types/node.go @@ -0,0 +1,659 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "encoding/json" + "fmt" + "net" + "path" + "slices" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/annotation" + "github.com/cilium/cilium/pkg/cidr" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/defaults" + ipamTypes "github.com/cilium/cilium/pkg/ipam/types" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + "github.com/cilium/cilium/pkg/kvstore/store" + "github.com/cilium/cilium/pkg/node/addressing" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/source" +) + +// Identity represents the node identity of a node. +type Identity struct { + Name string + Cluster string +} + +// String returns the string representation on NodeIdentity. +func (nn Identity) String() string { + return path.Join(nn.Cluster, nn.Name) +} + +// appendAllocCDIR sets or appends the given podCIDR to the node. +// If the IPv4/IPv6AllocCIDR is already set, we add the podCIDR as a secondary +// alloc CIDR. +func (n *Node) appendAllocCDIR(podCIDR *cidr.CIDR) { + if podCIDR.IP.To4() != nil { + if n.IPv4AllocCIDR == nil { + n.IPv4AllocCIDR = podCIDR + } else { + n.IPv4SecondaryAllocCIDRs = append(n.IPv4SecondaryAllocCIDRs, podCIDR) + } + } else { + if n.IPv6AllocCIDR == nil { + n.IPv6AllocCIDR = podCIDR + } else { + n.IPv6SecondaryAllocCIDRs = append(n.IPv6SecondaryAllocCIDRs, podCIDR) + } + } +} + +// ParseCiliumNode parses a CiliumNode custom resource and returns a Node +// instance. Invalid IP and CIDRs are silently ignored +func ParseCiliumNode(n *ciliumv2.CiliumNode) (node Node) { + wireguardPubKey, _ := annotation.Get(n, annotation.WireguardPubKey, annotation.WireguardPubKeyAlias) + node = Node{ + Name: n.Name, + EncryptionKey: uint8(n.Spec.Encryption.Key), + Cluster: option.Config.ClusterName, + ClusterID: option.Config.ClusterID, + Source: source.CustomResource, + Labels: n.ObjectMeta.Labels, + Annotations: n.ObjectMeta.Annotations, + NodeIdentity: uint32(n.Spec.NodeIdentity), + WireguardPubKey: wireguardPubKey, + } + + for _, cidrString := range n.Spec.IPAM.PodCIDRs { + ipnet, err := cidr.ParseCIDR(cidrString) + if err == nil { + node.appendAllocCDIR(ipnet) + } + } + + for _, pool := range n.Spec.IPAM.Pools.Allocated { + for _, podCIDR := range pool.CIDRs { + ipnet, err := cidr.ParseCIDR(string(podCIDR)) + if err == nil { + node.appendAllocCDIR(ipnet) + } + } + } + + node.IPv4HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv4) + node.IPv6HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv6) + + node.IPv4IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV4) + node.IPv6IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV6) + + for _, address := range n.Spec.Addresses { + if ip := net.ParseIP(address.IP); ip != nil { + node.IPAddresses = append(node.IPAddresses, Address{Type: address.Type, IP: ip}) + } + } + + return +} + +// ToCiliumNode converts the node to a CiliumNode +func (n *Node) ToCiliumNode() *ciliumv2.CiliumNode { + var ( + podCIDRs []string + ipAddrs []ciliumv2.NodeAddress + healthIPv4, healthIPv6 string + ingressIPv4, ingressIPv6 string + ) + + if n.IPv4AllocCIDR != nil { + podCIDRs = append(podCIDRs, n.IPv4AllocCIDR.String()) + } + if n.IPv6AllocCIDR != nil { + podCIDRs = append(podCIDRs, n.IPv6AllocCIDR.String()) + } + for _, ipv4AllocCIDR := range n.IPv4SecondaryAllocCIDRs { + podCIDRs = append(podCIDRs, ipv4AllocCIDR.String()) + } + for _, ipv6AllocCIDR := range n.IPv6SecondaryAllocCIDRs { + podCIDRs = append(podCIDRs, ipv6AllocCIDR.String()) + } + if n.IPv4HealthIP != nil { + healthIPv4 = n.IPv4HealthIP.String() + } + if n.IPv6HealthIP != nil { + healthIPv6 = n.IPv6HealthIP.String() + } + if n.IPv4IngressIP != nil { + ingressIPv4 = n.IPv4IngressIP.String() + } + if n.IPv6IngressIP != nil { + ingressIPv6 = n.IPv6IngressIP.String() + } + + for _, address := range n.IPAddresses { + ipAddrs = append(ipAddrs, ciliumv2.NodeAddress{ + Type: address.Type, + IP: address.IP.String(), + }) + } + + return &ciliumv2.CiliumNode{ + ObjectMeta: v1.ObjectMeta{ + Name: n.Name, + Labels: n.Labels, + Annotations: n.Annotations, + }, + Spec: ciliumv2.NodeSpec{ + Addresses: ipAddrs, + HealthAddressing: ciliumv2.HealthAddressingSpec{ + IPv4: healthIPv4, + IPv6: healthIPv6, + }, + IngressAddressing: ciliumv2.AddressPair{ + IPV4: ingressIPv4, + IPV6: ingressIPv6, + }, + Encryption: ciliumv2.EncryptionSpec{ + Key: int(n.EncryptionKey), + }, + IPAM: ipamTypes.IPAMSpec{ + PodCIDRs: podCIDRs, + }, + NodeIdentity: uint64(n.NodeIdentity), + }, + } +} + +// RegisterNode overloads GetKeyName to ignore the cluster name, as cluster name may not be stable during node registration. +// +// +k8s:deepcopy-gen=true +type RegisterNode struct { + Node +} + +// GetKeyName Overloaded key name w/o cluster name +func (n *RegisterNode) GetKeyName() string { + return n.Name +} + +// DeepKeyCopy creates a deep copy of the LocalKey +func (n *RegisterNode) DeepKeyCopy() store.LocalKey { + return n.DeepCopy() +} + +func (n *RegisterNode) Unmarshal(_ string, data []byte) error { + newNode := Node{} + if err := json.Unmarshal(data, &newNode); err != nil { + return err + } + + n.Node = newNode + return nil +} + +// Node contains the nodes name, the list of addresses to this address +// +// +k8s:deepcopy-gen=true +type Node struct { + // Name is the name of the node. This is typically the hostname of the node. + Name string + + // Cluster is the name of the cluster the node is associated with + Cluster string + + IPAddresses []Address + + // IPv4AllocCIDR if set, is the IPv4 address pool out of which the node + // allocates IPs for local endpoints from + IPv4AllocCIDR *cidr.CIDR + + // IPv4SecondaryAllocCIDRs contains additional IPv4 CIDRs from which this + //node allocates IPs for its local endpoints from + IPv4SecondaryAllocCIDRs []*cidr.CIDR + + // IPv6AllocCIDR if set, is the IPv6 address pool out of which the node + // allocates IPs for local endpoints from + IPv6AllocCIDR *cidr.CIDR + + // IPv6SecondaryAllocCIDRs contains additional IPv6 CIDRs from which this + // node allocates IPs for its local endpoints from + IPv6SecondaryAllocCIDRs []*cidr.CIDR + + // IPv4HealthIP if not nil, this is the IPv4 address of the + // cilium-health endpoint located on the node. + IPv4HealthIP net.IP + + // IPv6HealthIP if not nil, this is the IPv6 address of the + // cilium-health endpoint located on the node. + IPv6HealthIP net.IP + + // IPv4IngressIP if not nil, this is the IPv4 address of the + // Ingress listener on the node. + IPv4IngressIP net.IP + + // IPv6IngressIP if not nil, this is the IPv6 address of the + // Ingress listener located on the node. + IPv6IngressIP net.IP + + // ClusterID is the unique identifier of the cluster + ClusterID uint32 + + // Source is the source where the node configuration was generated / created. + Source source.Source + + // Key index used for transparent encryption or 0 for no encryption + EncryptionKey uint8 + + // Node labels + Labels map[string]string + + // Node annotations + Annotations map[string]string + + // NodeIdentity is the numeric identity allocated for the node + NodeIdentity uint32 + + // WireguardPubKey is the WireGuard public key of this node + WireguardPubKey string +} + +// Fullname returns the node's full name including the cluster name if a +// cluster name value other than the default value has been specified +func (n *Node) Fullname() string { + if n.Cluster != defaults.ClusterName { + return path.Join(n.Cluster, n.Name) + } + + return n.Name +} + +// Address is a node address which contains an IP and the address type. +// +// +k8s:deepcopy-gen=true +type Address struct { + Type addressing.AddressType + IP net.IP +} + +func (a Address) ToString() string { + return a.IP.String() +} + +func (a Address) AddrType() addressing.AddressType { + return a.Type +} + +// GetNodeIP returns one of the node's IP addresses available with the +// following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// Nil is returned if GetNodeIP fails to extract an IP from the Node based +// on the provided address family. +func (n *Node) GetNodeIP(ipv6 bool) net.IP { + return addressing.ExtractNodeIP[Address](n.IPAddresses, ipv6) +} + +// GetExternalIP returns ExternalIP of k8s Node. If not present, then it +// returns nil; +func (n *Node) GetExternalIP(ipv6 bool) net.IP { + for _, addr := range n.IPAddresses { + if (ipv6 && addr.IP.To4() != nil) || (!ipv6 && addr.IP.To4() == nil) { + continue + } + if addr.Type == addressing.NodeExternalIP { + return addr.IP + } + } + + return nil +} + +// GetK8sNodeIPs returns k8s Node IP (either InternalIP or ExternalIP or nil; +// the former is preferred). +func (n *Node) GetK8sNodeIP() net.IP { + var externalIP net.IP + + for _, addr := range n.IPAddresses { + if addr.Type == addressing.NodeInternalIP { + return addr.IP + } else if addr.Type == addressing.NodeExternalIP { + externalIP = addr.IP + } + } + + return externalIP +} + +// GetNodeInternalIP returns the Internal IPv4 of node or nil. +func (n *Node) GetNodeInternalIPv4() net.IP { + for _, addr := range n.IPAddresses { + if addr.IP.To4() == nil { + continue + } + if addr.Type == addressing.NodeInternalIP { + return addr.IP + } + } + + return nil +} + +// GetNodeInternalIP returns the Internal IPv6 of node or nil. +func (n *Node) GetNodeInternalIPv6() net.IP { + for _, addr := range n.IPAddresses { + if addr.IP.To4() != nil { + continue + } + if addr.Type == addressing.NodeInternalIP { + return addr.IP + } + } + + return nil +} + +// GetCiliumInternalIP returns the CiliumInternalIP e.g. the IP associated +// with cilium_host on the node. +func (n *Node) GetCiliumInternalIP(ipv6 bool) net.IP { + for _, addr := range n.IPAddresses { + if (ipv6 && addr.IP.To4() != nil) || + (!ipv6 && addr.IP.To4() == nil) { + continue + } + if addr.Type == addressing.NodeCiliumInternalIP { + return addr.IP + } + } + return nil +} + +// SetCiliumInternalIP sets the CiliumInternalIP e.g. the IP associated +// with cilium_host on the node. +func (n *Node) SetCiliumInternalIP(newAddr net.IP) { + n.setAddress(addressing.NodeCiliumInternalIP, newAddr) +} + +// SetNodeExternalIP sets the NodeExternalIP. +func (n *Node) SetNodeExternalIP(newAddr net.IP) { + n.setAddress(addressing.NodeExternalIP, newAddr) +} + +// SetNodeInternalIP sets the NodeInternalIP. +func (n *Node) SetNodeInternalIP(newAddr net.IP) { + n.setAddress(addressing.NodeInternalIP, newAddr) +} + +func (n *Node) RemoveAddresses(typ addressing.AddressType) { + newAddresses := []Address{} + for _, addr := range n.IPAddresses { + if addr.Type != typ { + newAddresses = append(newAddresses, addr) + } + } + n.IPAddresses = newAddresses +} + +func (n *Node) setAddress(typ addressing.AddressType, newIP net.IP) { + newAddr := Address{Type: typ, IP: newIP} + + if newIP == nil { + n.RemoveAddresses(typ) + return + } + + // Create a copy of the slice, so that we don't modify the + // current one, which may be captured by any of the observers. + n.IPAddresses = slices.Clone(n.IPAddresses) + + ipv6 := newIP.To4() == nil + // Try first to replace an existing address with same type + for i, addr := range n.IPAddresses { + if addr.Type != typ { + continue + } + if ipv6 != (addr.IP.To4() == nil) { + // Don't replace if address family is different. + continue + } + n.IPAddresses[i] = newAddr + return + } + n.IPAddresses = append(n.IPAddresses, newAddr) + +} + +func (n *Node) GetIPByType(addrType addressing.AddressType, ipv6 bool) net.IP { + for _, addr := range n.IPAddresses { + if addr.Type != addrType { + continue + } + if is4 := addr.IP.To4() != nil; (!ipv6 && is4) || (ipv6 && !is4) { + return addr.IP + } + } + return nil +} + +func (n *Node) getPrimaryAddress() *models.NodeAddressing { + v4 := n.GetNodeIP(false) + v6 := n.GetNodeIP(true) + + var ipv4AllocStr, ipv6AllocStr string + if n.IPv4AllocCIDR != nil { + ipv4AllocStr = n.IPv4AllocCIDR.String() + } + if n.IPv6AllocCIDR != nil { + ipv6AllocStr = n.IPv6AllocCIDR.String() + } + + var v4Str, v6Str string + if v4 != nil { + v4Str = v4.String() + } + if v6 != nil { + v6Str = v6.String() + } + + return &models.NodeAddressing{ + IPV4: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv4, + IP: v4Str, + AllocRange: ipv4AllocStr, + }, + IPV6: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv6, + IP: v6Str, + AllocRange: ipv6AllocStr, + }, + } +} + +func (n *Node) isPrimaryAddress(addr Address, ipv4 bool) bool { + return addr.IP.String() == n.GetNodeIP(!ipv4).String() +} + +func (n *Node) getSecondaryAddresses() []*models.NodeAddressingElement { + result := []*models.NodeAddressingElement{} + + for _, addr := range n.IPAddresses { + ipv4 := false + if addr.IP.To4() != nil { + ipv4 = true + } + if !n.isPrimaryAddress(addr, ipv4) { + result = append(result, &models.NodeAddressingElement{ + IP: addr.IP.String(), + }) + } + } + + return result +} + +func (n *Node) getHealthAddresses() *models.NodeAddressing { + if n.IPv4HealthIP == nil && n.IPv6HealthIP == nil { + return nil + } + + var v4Str, v6Str string + if n.IPv4HealthIP != nil { + v4Str = n.IPv4HealthIP.String() + } + if n.IPv6HealthIP != nil { + v6Str = n.IPv6HealthIP.String() + } + + return &models.NodeAddressing{ + IPV4: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv4, + IP: v4Str, + }, + IPV6: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv6, + IP: v6Str, + }, + } +} + +func (n *Node) getIngressAddresses() *models.NodeAddressing { + if n.IPv4IngressIP == nil && n.IPv6IngressIP == nil { + return nil + } + + var v4Str, v6Str string + if n.IPv4IngressIP != nil { + v4Str = n.IPv4IngressIP.String() + } + if n.IPv6IngressIP != nil { + v6Str = n.IPv6IngressIP.String() + } + + return &models.NodeAddressing{ + IPV4: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv4, + IP: v4Str, + }, + IPV6: &models.NodeAddressingElement{ + Enabled: option.Config.EnableIPv6, + IP: v6Str, + }, + } +} + +// GetModel returns the API model representation of a node. +func (n *Node) GetModel() *models.NodeElement { + return &models.NodeElement{ + Name: n.Fullname(), + PrimaryAddress: n.getPrimaryAddress(), + SecondaryAddresses: n.getSecondaryAddresses(), + HealthEndpointAddress: n.getHealthAddresses(), + IngressAddress: n.getIngressAddresses(), + Source: string(n.Source), + } +} + +// Identity returns the identity of the node +func (n *Node) Identity() Identity { + return Identity{ + Name: n.Name, + Cluster: n.Cluster, + } +} + +func getCluster() string { + return option.Config.ClusterName +} + +// IsLocal returns true if this is the node on which the agent itself is +// running on +func (n *Node) IsLocal() bool { + return n != nil && n.Name == GetName() && n.Cluster == getCluster() +} + +func (n *Node) GetIPv4AllocCIDRs() []*cidr.CIDR { + result := make([]*cidr.CIDR, 0, len(n.IPv4SecondaryAllocCIDRs)+1) + if n.IPv4AllocCIDR != nil { + result = append(result, n.IPv4AllocCIDR) + } + if len(n.IPv4SecondaryAllocCIDRs) > 0 { + result = append(result, n.IPv4SecondaryAllocCIDRs...) + } + return result +} + +func (n *Node) GetIPv6AllocCIDRs() []*cidr.CIDR { + result := make([]*cidr.CIDR, 0, len(n.IPv6SecondaryAllocCIDRs)+1) + if n.IPv6AllocCIDR != nil { + result = append(result, n.IPv6AllocCIDR) + } + if len(n.IPv6SecondaryAllocCIDRs) > 0 { + result = append(result, n.IPv6SecondaryAllocCIDRs...) + } + return result +} + +// GetKeyNodeName constructs the API name for the given cluster and node name. +func GetKeyNodeName(cluster, node string) string { + // WARNING - STABLE API: Changing the structure of the key may break + // backwards compatibility + return path.Join(cluster, node) +} + +// GetKeyName returns the kvstore key to be used for the node +func (n *Node) GetKeyName() string { + return GetKeyNodeName(n.Cluster, n.Name) +} + +// DeepKeyCopy creates a deep copy of the LocalKey +func (n *Node) DeepKeyCopy() store.LocalKey { + return n.DeepCopy() +} + +// Marshal returns the node object as JSON byte slice +func (n *Node) Marshal() ([]byte, error) { + return json.Marshal(n) +} + +// Unmarshal parses the JSON byte slice and updates the node receiver +func (n *Node) Unmarshal(_ string, data []byte) error { + newNode := Node{} + if err := json.Unmarshal(data, &newNode); err != nil { + return err + } + + if err := newNode.validate(); err != nil { + return err + } + + *n = newNode + + return nil +} + +// LogRepr returns a representation of the node to be used for logging +func (n *Node) LogRepr() string { + b, err := n.Marshal() + if err != nil { + return fmt.Sprintf("%#v", n) + } + return string(b) +} + +func (n *Node) validate() error { + // Skip the ClusterID check if it matches the local one, as we assume that + // it has already been validated, and to allow it to be zero. + if n.ClusterID != option.Config.ClusterID { + if err := cmtypes.ValidateClusterID(n.ClusterID); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go b/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go new file mode 100644 index 0000000000..8faa30c3ed --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "os" + + "github.com/cilium/cilium/pkg/defaults" + k8sConsts "github.com/cilium/cilium/pkg/k8s/constants" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" +) + +var ( + nodeName = "localhost" +) + +// SetName sets the name of the local node. This will overwrite the value that +// is automatically retrieved with `os.Hostname()`. +// +// Note: This function is currently designed to only be called during the +// bootstrapping procedure of the agent where no parallelism exists. If you +// want to use this function in later stages, a mutex must be added first. +func SetName(name string) { + nodeName = name +} + +// GetName returns the name of the local node. The value returned was either +// previously set with SetName(), retrieved via `os.Hostname()`, or as a last +// resort is hardcoded to "localhost". +func GetName() string { + return nodeName +} + +// GetAbsoluteNodeName returns the absolute node name combined of both +// (prefixed)cluster name and the local node name in case of +// clustered environments otherwise returns the name of the local node. +func GetAbsoluteNodeName() string { + if clusterName := GetClusterName(); clusterName != "" { + return clusterName + "/" + nodeName + } else { + return nodeName + } +} + +func GetClusterName() string { + if option.Config.ClusterName != "" && + option.Config.ClusterName != defaults.ClusterName { + return option.Config.ClusterName + } else { + return "" + } +} + +func init() { + // Give priority to the environment variable available in the Cilium agent + if name := os.Getenv(k8sConsts.EnvNodeNameSpec); name != "" { + nodeName = name + return + } + if h, err := os.Hostname(); err != nil { + log.WithError(err).Warn("Unable to retrieve local hostname") + } else { + log.WithField(logfields.NodeName, h).Debug("os.Hostname() returned") + nodeName = h + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6f78dd8c58 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go @@ -0,0 +1,138 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package types + +import ( + net "net" + + cidr "github.com/cilium/cilium/pkg/cidr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Address) DeepCopyInto(out *Address) { + *out = *in + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address. +func (in *Address) DeepCopy() *Address { + if in == nil { + return nil + } + out := new(Address) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]Address, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPv4AllocCIDR != nil { + in, out := &in.IPv4AllocCIDR, &out.IPv4AllocCIDR + *out = (*in).DeepCopy() + } + if in.IPv4SecondaryAllocCIDRs != nil { + in, out := &in.IPv4SecondaryAllocCIDRs, &out.IPv4SecondaryAllocCIDRs + *out = make([]*cidr.CIDR, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = (*in).DeepCopy() + } + } + } + if in.IPv6AllocCIDR != nil { + in, out := &in.IPv6AllocCIDR, &out.IPv6AllocCIDR + *out = (*in).DeepCopy() + } + if in.IPv6SecondaryAllocCIDRs != nil { + in, out := &in.IPv6SecondaryAllocCIDRs, &out.IPv6SecondaryAllocCIDRs + *out = make([]*cidr.CIDR, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = (*in).DeepCopy() + } + } + } + if in.IPv4HealthIP != nil { + in, out := &in.IPv4HealthIP, &out.IPv4HealthIP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + if in.IPv6HealthIP != nil { + in, out := &in.IPv6HealthIP, &out.IPv6HealthIP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + if in.IPv4IngressIP != nil { + in, out := &in.IPv4IngressIP, &out.IPv4IngressIP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + if in.IPv6IngressIP != nil { + in, out := &in.IPv6IngressIP, &out.IPv6IngressIP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegisterNode) DeepCopyInto(out *RegisterNode) { + *out = *in + in.Node.DeepCopyInto(&out.Node) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegisterNode. +func (in *RegisterNode) DeepCopy() *RegisterNode { + if in == nil { + return nil + } + out := new(RegisterNode) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go new file mode 100644 index 0000000000..33f14a9cd9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "net/netip" + + "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/policy/api" +) + +// getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice +// and returns them as regular golang CIDR objects. +func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix { + result, _, _ := ip.ParsePrefixes(cidrs.StringSlice()) + return result +} + +// GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice +// and returns them as regular golang CIDR objects. +// +// Assumes that validation already occurred on 'rules'. +func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix { + cidrs := api.ComputeResultantCIDRSet(rules) + return getPrefixesFromCIDR(cidrs) +} + +// GetCIDRPrefixes runs through the specified 'rules' to find every reference +// to a CIDR in the rules, and returns a slice containing all of these CIDRs. +// Multiple rules referring to the same CIDR will result in multiple copies of +// the CIDR in the returned slice. +// +// Assumes that validation already occurred on 'rules'. +func GetCIDRPrefixes(rules api.Rules) []netip.Prefix { + if len(rules) == 0 { + return nil + } + res := make([]netip.Prefix, 0, 32) + for _, r := range rules { + for _, ir := range r.Ingress { + if len(ir.FromCIDR) > 0 { + res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...) + } + if len(ir.FromCIDRSet) > 0 { + res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) + } + } + for _, ir := range r.IngressDeny { + if len(ir.FromCIDR) > 0 { + res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...) + } + if len(ir.FromCIDRSet) > 0 { + res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) + } + } + for _, er := range r.Egress { + if len(er.ToCIDR) > 0 { + res = append(res, getPrefixesFromCIDR(er.ToCIDR)...) + } + if len(er.ToCIDRSet) > 0 { + res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...) + } + } + for _, er := range r.EgressDeny { + if len(er.ToCIDR) > 0 { + res = append(res, getPrefixesFromCIDR(er.ToCIDR)...) + } + if len(er.ToCIDRSet) > 0 { + res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...) + } + } + } + return res +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/config.go b/vendor/github.com/cilium/cilium/pkg/policy/config.go new file mode 100644 index 0000000000..ed63c47478 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/config.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/time" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy") + mutex lock.RWMutex // Protects enablePolicy + enablePolicy string // Whether policy enforcement is enabled. +) + +// SetPolicyEnabled sets the policy enablement configuration. Valid values are: +// - endpoint.AlwaysEnforce +// - endpoint.NeverEnforce +// - endpoint.DefaultEnforcement +func SetPolicyEnabled(val string) { + mutex.Lock() + enablePolicy = val + mutex.Unlock() +} + +// GetPolicyEnabled returns the policy enablement configuration +func GetPolicyEnabled() string { + mutex.RLock() + val := enablePolicy + mutex.RUnlock() + return val +} + +// AddOptions are options which can be passed to PolicyAdd +type AddOptions struct { + // Replace if true indicates that existing rules with identical labels should be replaced + Replace bool + // ReplaceWithLabels if present indicates that existing rules with the + // given LabelArray should be deleted. + ReplaceWithLabels labels.LabelArray + // Generated should be set as true to signalize a the policy being inserted + // was generated by cilium-agent, e.g. dns poller. + Generated bool + + // The source of this policy, one of api, fqdn or k8s + Source source.Source + + // The time the policy initially began to be processed in Cilium, such as when the + // policy was received from the API server. + ProcessingStartTime time.Time + + // Resource provides the object ID for the underlying object that backs + // this information from 'source'. + Resource ipcacheTypes.ResourceID +} + +// DeleteOptions are options which can be passed to PolicyDelete +type DeleteOptions struct { + // The source of this policy, one of api, fqdn or k8s + Source source.Source + + // Resource provides the object ID for the underlying object that backs + // this information from 'source'. + Resource ipcacheTypes.ResourceID +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/distillery.go b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go new file mode 100644 index 0000000000..2889ebfc18 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + "sync/atomic" + + identityPkg "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/identitymanager" + "github.com/cilium/cilium/pkg/lock" +) + +// SelectorPolicy represents a cached selectorPolicy, previously resolved from +// the policy repository and ready to be distilled against a set of identities +// to compute datapath-level policy configuration. +type SelectorPolicy interface { + // Consume returns the policy in terms of connectivity to peer + // Identities. + Consume(owner PolicyOwner) *EndpointPolicy +} + +// PolicyCache represents a cache of resolved policies for identities. +type PolicyCache struct { + lock.Mutex + + // repo is a circular reference back to the Repository, but as + // we create only one Repository and one PolicyCache for each + // Cilium Agent process, these will never need to be garbage + // collected. + repo *Repository + policies map[identityPkg.NumericIdentity]*cachedSelectorPolicy +} + +// NewPolicyCache creates a new cache of SelectorPolicy. +func NewPolicyCache(repo *Repository, subscribe bool) *PolicyCache { + cache := &PolicyCache{ + repo: repo, + policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy), + } + if subscribe { + identitymanager.Subscribe(cache) + } + return cache +} + +func (cache *PolicyCache) GetSelectorCache() *SelectorCache { + return cache.repo.GetSelectorCache() +} + +// lookupOrCreate adds the specified Identity to the policy cache, with a reference +// from the specified Endpoint, then returns the threadsafe copy of the policy. +func (cache *PolicyCache) lookupOrCreate(identity *identityPkg.Identity, create bool) SelectorPolicy { + cache.Lock() + defer cache.Unlock() + cip, ok := cache.policies[identity.ID] + if !ok { + if !create { + return nil + } + cip = newCachedSelectorPolicy(identity, cache.repo.GetSelectorCache()) + cache.policies[identity.ID] = cip + } + return cip +} + +// insert adds the specified Identity to the policy cache, with a reference +// from the specified Endpoint, then returns the threadsafe copy of the policy. +func (cache *PolicyCache) insert(identity *identityPkg.Identity) SelectorPolicy { + return cache.lookupOrCreate(identity, true) +} + +// delete forgets about any cached SelectorPolicy that this endpoint uses. +// +// Returns true if the SelectorPolicy was removed from the cache. +func (cache *PolicyCache) delete(identity *identityPkg.Identity) bool { + cache.Lock() + defer cache.Unlock() + cip, ok := cache.policies[identity.ID] + if ok { + delete(cache.policies, identity.ID) + cip.getPolicy().Detach() + } + return ok +} + +// updateSelectorPolicy resolves the policy for the security identity of the +// specified endpoint and stores it internally. It will skip policy resolution +// if the cached policy is already at the revision specified in the repo. +// +// Returns whether the cache was updated, or an error. +// +// Must be called with repo.Mutex held for reading. +func (cache *PolicyCache) updateSelectorPolicy(identity *identityPkg.Identity) (bool, error) { + cache.Lock() + cip, ok := cache.policies[identity.ID] + cache.Unlock() + if !ok { + return false, fmt.Errorf("SelectorPolicy not found in cache for ID %d", identity.ID) + } + + // As long as UpdatePolicy() is triggered from endpoint + // regeneration, it's possible for two endpoints with the + // *same* identity to race to update the policy here. Such + // racing would lead to first of the endpoints using a + // selectorPolicy that is already detached from the selector + // cache, and thus not getting any incremental updates. + // + // Lock the 'cip' for the duration of the revision check and + // the possible policy update. + cip.Lock() + defer cip.Unlock() + + // Don't resolve policy if it was already done for this or later revision. + if cip.getPolicy().Revision >= cache.repo.GetRevision() { + return false, nil + } + + // Resolve the policies, which could fail + selPolicy, err := cache.repo.resolvePolicyLocked(identity) + if err != nil { + return false, err + } + + cip.setPolicy(selPolicy) + + return true, nil +} + +// LocalEndpointIdentityAdded creates a SelectorPolicy cache entry for the +// specified Identity, without calculating any policy for it. +func (cache *PolicyCache) LocalEndpointIdentityAdded(identity *identityPkg.Identity) { + cache.insert(identity) +} + +// LocalEndpointIdentityRemoved deletes the cached SelectorPolicy for the +// specified Identity. +func (cache *PolicyCache) LocalEndpointIdentityRemoved(identity *identityPkg.Identity) { + cache.delete(identity) +} + +// Lookup attempts to locate the SelectorPolicy corresponding to the specified +// identity. If policy is not cached for the identity, it returns nil. +func (cache *PolicyCache) Lookup(identity *identityPkg.Identity) SelectorPolicy { + return cache.lookupOrCreate(identity, false) +} + +// UpdatePolicy resolves the policy for the security identity of the specified +// endpoint and caches it for future use. +// +// The caller must provide threadsafety for iteration over the policy +// repository. +func (cache *PolicyCache) UpdatePolicy(identity *identityPkg.Identity) error { + _, err := cache.updateSelectorPolicy(identity) + return err +} + +// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID, if +// any, otherwise returns nil. +func (cache *PolicyCache) GetAuthTypes(localID, remoteID identityPkg.NumericIdentity) AuthTypes { + cache.Lock() + cip, ok := cache.policies[localID] + cache.Unlock() + if !ok { + return nil // No policy for localID (no endpoint with localID) + } + + // SelectorPolicy is const after it has been created, so no locking needed to access it + selPolicy := cip.getPolicy() + + var resTypes AuthTypes + for cs, authTypes := range selPolicy.L4Policy.AuthMap { + missing := false + for authType := range authTypes { + if _, exists := resTypes[authType]; !exists { + missing = true + break + } + } + // Only check if 'cs' selects 'remoteID' if one of the authTypes is still missing + // from the result + if missing && cs.Selects(remoteID) { + if resTypes == nil { + resTypes = make(AuthTypes, 1) + } + for authType := range authTypes { + resTypes[authType] = struct{}{} + } + } + } + return resTypes +} + +// cachedSelectorPolicy is a wrapper around a selectorPolicy (stored in the +// 'policy' field). It is always nested directly in the owning policyCache, +// and is protected against concurrent writes via the policyCache mutex. +type cachedSelectorPolicy struct { + lock.Mutex // lock is needed to synchronize parallel policy updates + + identity *identityPkg.Identity + policy atomic.Pointer[selectorPolicy] +} + +func newCachedSelectorPolicy(identity *identityPkg.Identity, selectorCache *SelectorCache) *cachedSelectorPolicy { + cip := &cachedSelectorPolicy{ + identity: identity, + } + cip.setPolicy(newSelectorPolicy(selectorCache)) + return cip +} + +// getPolicy returns a reference to the selectorPolicy that is cached. +// +// Users should treat the result as immutable state that MUST NOT be modified. +func (cip *cachedSelectorPolicy) getPolicy() *selectorPolicy { + return cip.policy.Load() +} + +// setPolicy updates the reference to the SelectorPolicy that is cached. +// Calls Detach() on the old policy, if any. +func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy) { + oldPolicy := cip.policy.Swap(policy) + if oldPolicy != nil { + // Release the references the previous policy holds on the selector cache. + oldPolicy.Detach() + } +} + +// Consume returns the EndpointPolicy that defines connectivity policy to +// Identities in the specified cache. +// +// This denotes that a particular endpoint is 'consuming' the policy from the +// selector policy cache. +func (cip *cachedSelectorPolicy) Consume(owner PolicyOwner) *EndpointPolicy { + // TODO: This currently computes the EndpointPolicy from SelectorPolicy + // on-demand, however in future the cip is intended to cache the + // EndpointPolicy for this Identity and emit datapath deltas instead. + isHost := cip.identity.ID == identityPkg.ReservedIdentityHost + return cip.getPolicy().DistillPolicy(owner, isHost) +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/identifier.go b/vendor/github.com/cilium/cilium/pkg/policy/identifier.go new file mode 100644 index 0000000000..6479c34b3c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/identifier.go @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "sync" + + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/lock" +) + +// Endpoint refers to any structure which has the following properties: +// * a node-local ID stored as a uint16 +// * a security identity +// * a means of incrementing its policy revision +// * a means of checking if it represents a node or a pod. +// * a set of labels +// * a kubernetes namespace +type Endpoint interface { + GetID16() uint16 + GetSecurityIdentity() (*identity.Identity, error) + PolicyRevisionBumpEvent(rev uint64) + IsHost() bool + GetOpLabels() []string + GetK8sNamespace() string +} + +// EndpointSet is used to be able to group together a given set of Endpoints +// that need to have a specific operation performed upon them (e.g., policy +// revision updates). +type EndpointSet struct { + mutex lock.RWMutex + endpoints map[Endpoint]struct{} +} + +// NewEndpointSet returns an EndpointSet with the given Endpoints map +func NewEndpointSet(m map[Endpoint]struct{}) *EndpointSet { + if m != nil { + return &EndpointSet{ + endpoints: m, + } + } + return &EndpointSet{ + endpoints: map[Endpoint]struct{}{}, + } +} + +// ForEachGo runs epFunc asynchronously inside a goroutine for each endpoint in +// the EndpointSet. It signals to the provided WaitGroup when epFunc has been +// executed for each endpoint. +func (e *EndpointSet) ForEachGo(wg *sync.WaitGroup, epFunc func(epp Endpoint)) { + e.mutex.RLock() + defer e.mutex.RUnlock() + + wg.Add(len(e.endpoints)) + + for ep := range e.endpoints { + go func(eppp Endpoint) { + epFunc(eppp) + wg.Done() + }(ep) + } +} + +// Delete removes ep from the EndpointSet. +func (e *EndpointSet) Delete(ep Endpoint) { + e.mutex.Lock() + delete(e.endpoints, ep) + e.mutex.Unlock() +} + +// Insert adds ep to the EndpointSet. +func (e *EndpointSet) Insert(ep Endpoint) { + e.mutex.Lock() + e.endpoints[ep] = struct{}{} + e.mutex.Unlock() +} + +// Len returns the number of elements in the EndpointSet. +func (e *EndpointSet) Len() (nElem int) { + e.mutex.RLock() + nElem = len(e.endpoints) + e.mutex.RUnlock() + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/l4.go new file mode 100644 index 0000000000..06477c7c1a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/l4.go @@ -0,0 +1,1404 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strconv" + "sync/atomic" + + cilium "github.com/cilium/proxy/go/cilium/api" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/iana" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/policy/api" + "github.com/cilium/cilium/pkg/policy/trafficdirection" + "github.com/cilium/cilium/pkg/u8proto" +) + +// covers returns true if 'l4rule' has the effect needed for the 'l3l4rule', when 'l4rule' is added +// to the datapath, due to the l4-only rule matching if l3l4-rule is not present. This determination +// can be done here only when both rules have the same port number (or both have a wildcarded port). +func (l4rule *PerSelectorPolicy) covers(l3l4rule *PerSelectorPolicy) bool { + // Deny takes highest precedence so it is dealt with first + if l4rule != nil && l4rule.IsDeny { + // l4-only deny takes precedence + return true + } else if l3l4rule != nil && l3l4rule.IsDeny { + // Must not skip if l3l4 rule is deny while l4-only rule is not + return false + } + + // Can not skip if currentRule has an explicit auth type and wildcardRule does not or if + // both have different auth types. In all other cases the auth type from the wildcardRule + // can be used also for the current rule. + // Note that the caller must deal with inheriting redirect from wildcardRule to currentRule, + // if any. + cHasAuth, cAuthType := l3l4rule.GetAuthType() + wHasAuth, wAuthType := l4rule.GetAuthType() + if cHasAuth && !wHasAuth || cHasAuth && wHasAuth && cAuthType != wAuthType { + return false + } + + l3l4IsRedirect := l3l4rule.IsRedirect() + l4OnlyIsRedirect := l4rule.IsRedirect() + if l3l4IsRedirect && !l4OnlyIsRedirect { + // Can not skip if l3l4-rule is redirect while l4-only is not + return false + } + + // else can skip + return true +} + +// TLS context holds the secret values resolved from an 'api.TLSContext' +type TLSContext struct { + TrustedCA string `json:"trustedCA,omitempty"` + CertificateChain string `json:"certificateChain,omitempty"` + PrivateKey string `json:"privateKey,omitempty"` +} + +// Equal returns true if 'a' and 'b' have the same contents. +func (a *TLSContext) Equal(b *TLSContext) bool { + return a == nil && b == nil || a != nil && b != nil && *a == *b +} + +// MarshalJSON marsahls a redacted version of the TLSContext. We want +// to see which fields are present, but not reveal their values in any +// logs, etc. +func (t *TLSContext) MarshalJSON() ([]byte, error) { + type tlsContext TLSContext + var redacted tlsContext + if t.TrustedCA != "" { + redacted.TrustedCA = "[redacted]" + } + if t.CertificateChain != "" { + redacted.CertificateChain = "[redacted]" + } + if t.PrivateKey != "" { + redacted.PrivateKey = "[redacted]" + } + return json.Marshal(&redacted) +} + +type StringSet map[string]struct{} + +func (a StringSet) Equal(b StringSet) bool { + if len(a) != len(b) { + return false + } + for k := range a { + if _, exists := b[k]; !exists { + return false + } + } + return true +} + +// NewStringSet returns a StringSet initialized from slice of strings. +// Returns nil for an empty slice +func NewStringSet(from []string) StringSet { + if len(from) == 0 { + return nil + } + set := make(StringSet, len(from)) + for _, s := range from { + set[s] = struct{}{} + } + return set +} + +// Merge returns StringSet with strings from both a and b. +// Returns a or b, possibly with modifications. +func (a StringSet) Merge(b StringSet) StringSet { + if len(a) == 0 { + return b + } + for s := range b { + a[s] = struct{}{} + } + return a +} + +// PerSelectorPolicy contains policy rules for a CachedSelector, i.e. for a +// selection of numerical identities. +type PerSelectorPolicy struct { + // TerminatingTLS is the TLS context for the connection terminated by + // the L7 proxy. For egress policy this specifies the server-side TLS + // parameters to be applied on the connections originated from the local + // POD and terminated by the L7 proxy. For ingress policy this specifies + // the server-side TLS parameters to be applied on the connections + // originated from a remote source and terminated by the L7 proxy. + TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"` + + // OriginatingTLS is the TLS context for the connections originated by + // the L7 proxy. For egress policy this specifies the client-side TLS + // parameters for the upstream connection originating from the L7 proxy + // to the remote destination. For ingress policy this specifies the + // client-side TLS parameters for the connection from the L7 proxy to + // the local POD. + OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"` + + // ServerNames is a list of allowed TLS SNI values. If not empty, then + // TLS must be present and one of the provided SNIs must be indicated in the + // TLS handshake. + ServerNames StringSet `json:"serverNames,omitempty"` + + // isRedirect is 'true' when traffic must be redirected + isRedirect bool `json:"-"` + + // Pre-computed HTTP rules, computed after rule merging is complete + EnvoyHTTPRules *cilium.HttpNetworkPolicyRules `json:"-"` + + // CanShortCircuit is true if all 'EnvoyHTTPRules' may be + // short-circuited by other matches. + CanShortCircuit bool `json:"-"` + + api.L7Rules + + // Authentication is the kind of cryptographic authentication required for the traffic to be allowed + // at L3, if any. + Authentication *api.Authentication `json:"auth,omitempty"` + + // IsDeny is set if this L4Filter contains should be denied + IsDeny bool `json:",omitempty"` +} + +// Equal returns true if 'a' and 'b' represent the same L7 Rules +func (a *PerSelectorPolicy) Equal(b *PerSelectorPolicy) bool { + return a == nil && b == nil || a != nil && b != nil && + a.TerminatingTLS.Equal(b.TerminatingTLS) && + a.OriginatingTLS.Equal(b.OriginatingTLS) && + a.ServerNames.Equal(b.ServerNames) && + a.isRedirect == b.isRedirect && + (a.Authentication == nil && b.Authentication == nil || a.Authentication != nil && a.Authentication.DeepEqual(b.Authentication)) && + a.IsDeny == b.IsDeny && + a.L7Rules.DeepEqual(&b.L7Rules) +} + +// AuthType enumerates the supported authentication types in api. +// Numerically higher type takes precedence in case of conflicting auth types. +type AuthType uint8 + +// AuthTypes is a set of AuthTypes, usually nil if empty +type AuthTypes map[AuthType]struct{} + +// Authmap maps remote selectors to their needed AuthTypes, if any +type AuthMap map[CachedSelector]AuthTypes + +const ( + // AuthTypeDisabled means no authentication required + AuthTypeDisabled AuthType = iota + // AuthTypeSpire is a mutual auth type that uses SPIFFE identities with a SPIRE server + AuthTypeSpire + // AuthTypeAlwaysFail is a simple auth type that always denies the request + AuthTypeAlwaysFail +) + +type HasAuthType bool + +const ( + DefaultAuthType HasAuthType = false + ExplicitAuthType HasAuthType = true +) + +// GetAuthType returns the AuthType of the L4Filter. +func (a *PerSelectorPolicy) GetAuthType() (HasAuthType, AuthType) { + if a == nil { + return DefaultAuthType, AuthTypeDisabled + } + return GetAuthType(a.Authentication) +} + +// GetAuthType returns boolean HasAuthType and AuthType for the api.Authentication +// If there is no explicit auth type, (DefaultAuthType, AuthTypeDisabled) is returned +func GetAuthType(auth *api.Authentication) (HasAuthType, AuthType) { + if auth == nil { + return DefaultAuthType, AuthTypeDisabled + } + switch auth.Mode { + case api.AuthenticationModeDisabled: + return ExplicitAuthType, AuthTypeDisabled + case api.AuthenticationModeRequired: + return ExplicitAuthType, AuthTypeSpire + case api.AuthenticationModeAlwaysFail: + return ExplicitAuthType, AuthTypeAlwaysFail + default: + return DefaultAuthType, AuthTypeDisabled + } +} + +// Uint8 returns AuthType as a uint8 +func (a AuthType) Uint8() uint8 { + return uint8(a) +} + +// String returns AuthType as a string +// This must return the strings accepted for api.AuthType +func (a AuthType) String() string { + switch a { + case AuthTypeDisabled: + return "disabled" + case AuthTypeSpire: + return "spire" + case AuthTypeAlwaysFail: + return "test-always-fail" + } + return "Unknown-auth-type-" + strconv.FormatUint(uint64(a.Uint8()), 10) +} + +// IsRedirect returns true if the L7Rules are a redirect. +func (a *PerSelectorPolicy) IsRedirect() bool { + return a != nil && a.isRedirect +} + +// HasL7Rules returns whether the `L7Rules` contains any L7 rules. +func (a *PerSelectorPolicy) HasL7Rules() bool { + return !a.L7Rules.IsEmpty() +} + +// L7DataMap contains a map of L7 rules per endpoint where key is a CachedSelector +type L7DataMap map[CachedSelector]*PerSelectorPolicy + +func (l7 L7DataMap) MarshalJSON() ([]byte, error) { + if len(l7) == 0 { + return []byte("[]"), nil + } + + /* First, create a sorted slice of the selectors so we can get + * consistent JSON output */ + selectors := make(CachedSelectorSlice, 0, len(l7)) + for cs := range l7 { + selectors = append(selectors, cs) + } + sort.Sort(selectors) + + /* Now we can iterate the slice and generate JSON entries. */ + var err error + buffer := bytes.NewBufferString("[") + for _, cs := range selectors { + buffer.WriteString("{\"") + buffer.WriteString(cs.String()) + buffer.WriteString("\":") + b, err := json.Marshal(l7[cs]) + if err == nil { + buffer.Write(b) + } else { + buffer.WriteString("\"L7DataMap error: ") + buffer.WriteString(err.Error()) + buffer.WriteString("\"") + } + buffer.WriteString("},") + } + buffer.Truncate(buffer.Len() - 1) // Drop the final "," + buffer.WriteString("]") + + return buffer.Bytes(), err +} + +// ShallowCopy returns a shallow copy of the L7DataMap. +func (l7 L7DataMap) ShallowCopy() L7DataMap { + m := make(L7DataMap, len(l7)) + for k, v := range l7 { + m[k] = v + } + return m +} + +// L7ParserType is the type used to indicate what L7 parser to use. +// Consts are defined for all well known L7 parsers. +// Unknown string values are created for key-value pair policies, which +// are then transparently used in redirect configuration. +type L7ParserType string + +func (l7 L7ParserType) String() string { + return (string)(l7) +} + +const ( + // ParserTypeNone represents the case where no parser type is provided. + ParserTypeNone L7ParserType = "" + // ParserTypeTLS is used for TLS origination, termination, or SNI filtering without any L7 + // parsing. If TLS policies are used with HTTP rules, ParserTypeHTTP is used instead. + ParserTypeTLS L7ParserType = "tls" + // ParserTypeCRD is used with a custom CiliumEnvoyConfig redirection. Incompatible with any + // parser type with L7 enforcement (HTTP, Kafka, proxylib), as the custom Listener generally + // does not support them. + ParserTypeCRD L7ParserType = "crd" + // ParserTypeHTTP specifies a HTTP parser type + ParserTypeHTTP L7ParserType = "http" + // ParserTypeKafka specifies a Kafka parser type + ParserTypeKafka L7ParserType = "kafka" + // ParserTypeDNS specifies a DNS parser type + ParserTypeDNS L7ParserType = "dns" +) + +// redirectTypes is a bitmask of redirection types of multiple filters +type redirectTypes uint16 + +const ( + // redirectTypeDNS bit is set when policy contains a redirection to DNS proxy + redirectTypeDNS redirectTypes = 1 << iota + // redirectTypeEnvoy bit is set when policy contains a redirection to Envoy + redirectTypeEnvoy + // redirectTypeProxylib bits are set when policy contains a redirection to Proxylib (via + // Envoy) + redirectTypeProxylib redirectTypes = 1< 0 || !rules.IsEmpty()) + for epsel := range l7 { + l7policy := &PerSelectorPolicy{ + TerminatingTLS: terminatingTLS, + OriginatingTLS: originatingTLS, + Authentication: auth, + IsDeny: deny, + ServerNames: NewStringSet(sni), + isRedirect: isRedirect, + } + if rules != nil { + l7policy.L7Rules = *rules + } + l7[epsel] = l7policy + } +} + +type TLSDirection string + +const ( + TerminatingTLS TLSDirection = "terminating" + OriginatingTLS TLSDirection = "originating" +) + +func (l4 *L4Filter) getCerts(policyCtx PolicyContext, tls *api.TLSContext, direction TLSDirection) (*TLSContext, error) { + if tls == nil { + return nil, nil + } + ca, public, private, err := policyCtx.GetTLSContext(tls) + if err != nil { + log.WithError(err).Warningf("policy: Error getting %s TLS Context.", direction) + return nil, err + } + switch direction { + case TerminatingTLS: + if public == "" || private == "" { + return nil, fmt.Errorf("Terminating TLS context is missing certs.") + } + case OriginatingTLS: + if ca == "" { + return nil, fmt.Errorf("Originating TLS context is missing CA certs.") + } + default: + return nil, fmt.Errorf("invalid TLS direction: %s", direction) + } + + return &TLSContext{ + TrustedCA: ca, + CertificateChain: public, + PrivateKey: private, + }, nil +} + +// createL4Filter creates a filter for L4 policy that applies to the specified +// endpoints and port/protocol, with reference to the original rules that the +// filter is derived from. This filter may be associated with a series of L7 +// rules via the `rule` parameter. +// Not called with an empty peerEndpoints. +func createL4Filter(policyCtx PolicyContext, peerEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol, + protocol api.L4Proto, ruleLabels labels.LabelArray, ingress bool, fqdns api.FQDNSelectorSlice) (*L4Filter, error) { + selectorCache := policyCtx.GetSelectorCache() + + portName := "" + p := uint64(0) + if iana.IsSvcName(port.Port) { + portName = port.Port + } else { + // already validated via PortRule.Validate() + p, _ = strconv.ParseUint(port.Port, 0, 16) + } + + // already validated via L4Proto.Validate(), never "ANY" + u8p, _ := u8proto.ParseProtocol(string(protocol)) + + l4 := &L4Filter{ + Port: int(p), // 0 for L3-only rules and named ports + PortName: portName, // non-"" for named ports + Protocol: protocol, + U8Proto: u8p, + PerSelectorPolicies: make(L7DataMap), + RuleOrigin: make(map[CachedSelector]labels.LabelArrayList), // Filled in below. + Ingress: ingress, + } + + if peerEndpoints.SelectsAllEndpoints() { + l4.wildcard = l4.cacheIdentitySelector(api.WildcardEndpointSelector, ruleLabels, selectorCache) + } else { + l4.cacheIdentitySelectors(peerEndpoints, ruleLabels, selectorCache) + l4.cacheFQDNSelectors(fqdns, ruleLabels, selectorCache) + } + + var terminatingTLS *TLSContext + var originatingTLS *TLSContext + var rules *api.L7Rules + var sni []string + forceRedirect := false + pr := rule.GetPortRule() + if pr != nil { + rules = pr.Rules + sni = pr.ServerNames + + // Get TLS contexts, if any + var err error + terminatingTLS, err = l4.getCerts(policyCtx, pr.TerminatingTLS, TerminatingTLS) + if err != nil { + return nil, err + } + originatingTLS, err = l4.getCerts(policyCtx, pr.OriginatingTLS, OriginatingTLS) + if err != nil { + return nil, err + } + + // Set parser type to TLS, if TLS. This will be overridden by L7 below, if rules + // exists. + if terminatingTLS != nil || originatingTLS != nil || len(pr.ServerNames) > 0 { + l4.L7Parser = ParserTypeTLS + } + + // Determine L7ParserType from rules present. Earlier validation ensures rules + // for multiple protocols are not present here. + if rules != nil { + // we need this to redirect DNS UDP (or ANY, which is more useful) + if len(rules.DNS) > 0 { + l4.L7Parser = ParserTypeDNS + } else if protocol == api.ProtoTCP { // Other than DNS only support TCP + switch { + case len(rules.HTTP) > 0: + l4.L7Parser = ParserTypeHTTP + case len(rules.Kafka) > 0: + l4.L7Parser = ParserTypeKafka + case rules.L7Proto != "": + l4.L7Parser = (L7ParserType)(rules.L7Proto) + } + } + } + + // Override the parser type to CRD is applicable. + if pr.Listener != nil { + l4.L7Parser = ParserTypeCRD + ns := policyCtx.GetNamespace() + resource := pr.Listener.EnvoyConfig + switch resource.Kind { + case "CiliumEnvoyConfig": + if ns == "" { + // Cluster-scoped CCNP tries to use namespaced + // CiliumEnvoyConfig + // + // TODO: Catch this in rule validation once we have a + // validation context in there so that we can differentiate + // between CNP and CCNP at validation time. + return nil, fmt.Errorf("Listener %q in CCNP can not use Kind CiliumEnvoyConfig", pr.Listener.Name) + } + case "CiliumClusterwideEnvoyConfig": + // CNP refers to a cluster-scoped listener + ns = "" + default: + } + l4.Listener, _ = api.ResourceQualifiedName(ns, resource.Name, pr.Listener.Name, api.ForceNamespace) + forceRedirect = true + } + } + + if l4.L7Parser != ParserTypeNone || auth != nil || policyCtx.IsDeny() { + l4.PerSelectorPolicies.addPolicyForSelector(rules, terminatingTLS, originatingTLS, auth, policyCtx.IsDeny(), sni, forceRedirect) + } + + for cs := range l4.PerSelectorPolicies { + l4.RuleOrigin[cs] = labels.LabelArrayList{ruleLabels} + } + + return l4, nil +} + +func (l4 *L4Filter) removeSelectors(selectorCache *SelectorCache) { + selectors := make(CachedSelectorSlice, 0, len(l4.PerSelectorPolicies)) + for cs := range l4.PerSelectorPolicies { + selectors = append(selectors, cs) + } + selectorCache.RemoveSelectors(selectors, l4) +} + +// detach releases the references held in the L4Filter and must be called before +// the filter is left to be garbage collected. +// L4Filter may still be accessed concurrently after it has been detached. +func (l4 *L4Filter) detach(selectorCache *SelectorCache) { + l4.removeSelectors(selectorCache) + l4.policy.Store(nil) +} + +// attach signifies that the L4Filter is ready and reacheable for updates +// from SelectorCache. L4Filter (and L4Policy) is read-only after this is called, +// multiple goroutines will be reading the fields from that point on. +func (l4 *L4Filter) attach(ctx PolicyContext, l4Policy *L4Policy) policyFeatures { + // All rules have been added to the L4Filter at this point. + // Sort the rules label array list for more efficient equality comparison. + for _, labels := range l4.RuleOrigin { + labels.Sort() + } + + var features policyFeatures + for cs, cp := range l4.PerSelectorPolicies { + if cp != nil { + if cp.IsDeny { + features.setFeature(denyRules) + } + + hasAuth, authType := GetAuthType(cp.Authentication) + if hasAuth { + features.setFeature(authRules) + + if authType != AuthTypeDisabled { + if l4Policy.AuthMap == nil { + l4Policy.AuthMap = make(AuthMap, 1) + } + authTypes := l4Policy.AuthMap[cs] + if authTypes == nil { + authTypes = make(AuthTypes, 1) + } + authTypes[authType] = struct{}{} + l4Policy.AuthMap[cs] = authTypes + } + } + + // Compute Envoy policies when a policy is ready to be used + if len(cp.L7Rules.HTTP) > 0 { + cp.EnvoyHTTPRules, cp.CanShortCircuit = ctx.GetEnvoyHTTPRules(&cp.L7Rules) + } + } + } + + l4.policy.Store(l4Policy) + return features +} + +// createL4IngressFilter creates a filter for L4 policy that applies to the +// specified endpoints and port/protocol for ingress traffic, with reference +// to the original rules that the filter is derived from. This filter may be +// associated with a series of L7 rules via the `rule` parameter. +// +// hostWildcardL7 determines if L7 traffic from Host should be +// wildcarded (in the relevant daemon mode). +func createL4IngressFilter(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, rule api.Ports, port api.PortProtocol, + protocol api.L4Proto, ruleLabels labels.LabelArray) (*L4Filter, error) { + + filter, err := createL4Filter(policyCtx, fromEndpoints, auth, rule, port, protocol, ruleLabels, true, nil) + if err != nil { + return nil, err + } + + // If the filter would apply proxy redirection for the Host, when we should accept + // everything from host, then wildcard Host at L7. + if len(hostWildcardL7) > 0 { + for cs, l7 := range filter.PerSelectorPolicies { + if l7.IsRedirect() && cs.Selects(identity.ReservedIdentityHost) { + for _, name := range hostWildcardL7 { + selector := api.ReservedEndpointSelectors[name] + filter.cacheIdentitySelector(selector, ruleLabels, policyCtx.GetSelectorCache()) + } + } + } + } + + return filter, nil +} + +// createL4EgressFilter creates a filter for L4 policy that applies to the +// specified endpoints and port/protocol for egress traffic, with reference +// to the original rules that the filter is derived from. This filter may be +// associated with a series of L7 rules via the `rule` parameter. +func createL4EgressFilter(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol, + protocol api.L4Proto, ruleLabels labels.LabelArray, fqdns api.FQDNSelectorSlice) (*L4Filter, error) { + + return createL4Filter(policyCtx, toEndpoints, auth, rule, port, protocol, ruleLabels, false, fqdns) +} + +// redirectType returns the redirectType for this filter +func (l4 *L4Filter) redirectType() redirectTypes { + switch l4.L7Parser { + case ParserTypeNone: + return redirectTypeNone + case ParserTypeDNS: + return redirectTypeDNS + case ParserTypeHTTP, ParserTypeTLS, ParserTypeCRD: + return redirectTypeEnvoy + default: + // all other (non-empty) values are used for proxylib redirects + return redirectTypeProxylib + } +} + +// IsRedirect returns true if the L4 filter contains a port redirection +func (l4 *L4Filter) IsRedirect() bool { + return l4.L7Parser != ParserTypeNone +} + +// Marshal returns the `L4Filter` in a JSON string. +func (l4 *L4Filter) Marshal() string { + b, err := json.Marshal(l4) + if err != nil { + b = []byte("\"L4Filter error: " + err.Error() + "\"") + } + return string(b) +} + +// String returns the `L4Filter` in a human-readable string. +func (l4 *L4Filter) String() string { + b, err := json.Marshal(l4) + if err != nil { + return err.Error() + } + return string(b) +} + +// Note: Only used for policy tracing +func (l4 *L4Filter) matchesLabels(labels labels.LabelArray) (bool, bool) { + if l4.wildcard != nil { + perSelectorPolicy := l4.PerSelectorPolicies[l4.wildcard] + isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny + return true, isDeny + } else if len(labels) == 0 { + return false, false + } + + var selected bool + for sel, rule := range l4.PerSelectorPolicies { + // slow, but OK for tracing + idSel := sel.(*identitySelector) + if lis, ok := idSel.source.(*labelIdentitySelector); ok && lis.xxxMatches(labels) { + isDeny := rule != nil && rule.IsDeny + selected = true + if isDeny { + return true, isDeny + } + } + } + return selected, false +} + +// addL4Filter adds 'filterToMerge' into the 'resMap'. Returns an error if it +// the 'filterToMerge' can't be merged with an existing filter for the same +// port and proto. +func addL4Filter(policyCtx PolicyContext, + ctx *SearchContext, resMap L4PolicyMap, + p api.PortProtocol, proto api.L4Proto, + filterToMerge *L4Filter, + ruleLabels labels.LabelArray) error { + + key := p.Port + "/" + string(proto) + existingFilter, ok := resMap[key] + if !ok { + resMap[key] = filterToMerge + return nil + } + + selectorCache := policyCtx.GetSelectorCache() + if err := mergePortProto(ctx, existingFilter, filterToMerge, selectorCache); err != nil { + filterToMerge.detach(selectorCache) + return err + } + + // To keep the rule origin tracking correct, merge the rule label arrays for each CachedSelector + // we know about. New CachedSelectors are added. + for cs, newLabels := range filterToMerge.RuleOrigin { + if existingLabels, ok := existingFilter.RuleOrigin[cs]; ok { + existingFilter.RuleOrigin[cs] = existingLabels.MergeSorted(newLabels) + } else { + existingFilter.RuleOrigin[cs] = newLabels + } + } + + resMap[key] = existingFilter + return nil +} + +// L4PolicyMap is a list of L4 filters indexable by protocol/port +// key format: "port/proto" +type L4PolicyMap map[string]*L4Filter + +type policyFeatures uint8 + +const ( + denyRules policyFeatures = 1 << iota + authRules + + allFeatures policyFeatures = ^policyFeatures(0) +) + +func (pf *policyFeatures) setFeature(feature policyFeatures) { + *pf |= feature +} + +func (pf policyFeatures) contains(feature policyFeatures) bool { + return pf&feature != 0 +} + +type L4DirectionPolicy struct { + PortRules L4PolicyMap + + // features tracks properties of PortRules to skip code when features are not used + features policyFeatures +} + +func newL4DirectionPolicy() L4DirectionPolicy { + return L4DirectionPolicy{ + PortRules: L4PolicyMap{}, + } +} + +// Detach removes the cached selectors held by L4PolicyMap from the +// selectorCache, allowing the map to be garbage collected when there +// are no more references to it. +func (l4 L4DirectionPolicy) Detach(selectorCache *SelectorCache) { + l4.PortRules.Detach(selectorCache) +} + +// detach is used directly from tracing and testing functions +func (l4 L4PolicyMap) Detach(selectorCache *SelectorCache) { + for _, f := range l4 { + f.detach(selectorCache) + } +} + +// Attach makes all the L4Filters to point back to the L4Policy that contains them. +// This is done before the L4PolicyMap is exposed to concurrent access. +// Returns the bitmask of all redirect types for this policymap. +func (l4 *L4DirectionPolicy) attach(ctx PolicyContext, l4Policy *L4Policy) redirectTypes { + var redirectTypes redirectTypes + var features policyFeatures + for _, f := range l4.PortRules { + features |= f.attach(ctx, l4Policy) + redirectTypes |= f.redirectType() + } + l4.features = features + return redirectTypes +} + +// containsAllL3L4 checks if the L4PolicyMap contains all L4 ports in `ports`. +// For L4Filters that specify ToEndpoints or FromEndpoints, uses `labels` to +// determine whether the policy allows L4 communication between the corresponding +// endpoints. +// Returns api.Denied in the following conditions: +// - If a single port is not present in the `L4PolicyMap` and is not allowed +// by the distilled L3 policy +// - If a port is present in the `L4PolicyMap`, but it applies ToEndpoints or +// FromEndpoints constraints that require labels not present in `labels`. +// +// Otherwise, returns api.Allowed. +// +// Note: Only used for policy tracing +func (l4 L4PolicyMap) containsAllL3L4(labels labels.LabelArray, ports []*models.Port) api.Decision { + if len(l4) == 0 { + return api.Allowed + } + + // Check L3-only filters first. + filter, match := l4[api.PortProtocolAny] + if match { + + matches, isDeny := filter.matchesLabels(labels) + switch { + case matches && isDeny: + return api.Denied + case matches: + return api.Allowed + } + } + + for _, l4Ctx := range ports { + portStr := l4Ctx.Name + if !iana.IsSvcName(portStr) { + portStr = strconv.FormatUint(uint64(l4Ctx.Port), 10) + } + lwrProtocol := l4Ctx.Protocol + var isUDPDeny, isTCPDeny, isSCTPDeny bool + switch lwrProtocol { + case "", models.PortProtocolANY: + tcpPort := portStr + "/TCP" + tcpFilter, tcpmatch := l4[tcpPort] + if tcpmatch { + tcpmatch, isTCPDeny = tcpFilter.matchesLabels(labels) + } + + udpPort := portStr + "/UDP" + udpFilter, udpmatch := l4[udpPort] + if udpmatch { + udpmatch, isUDPDeny = udpFilter.matchesLabels(labels) + } + + sctpPort := portStr + "/SCTP" + sctpFilter, sctpmatch := l4[sctpPort] + if sctpmatch { + sctpmatch, isSCTPDeny = sctpFilter.matchesLabels(labels) + } + + if (!tcpmatch && !udpmatch && !sctpmatch) || (isTCPDeny && isUDPDeny && isSCTPDeny) { + return api.Denied + } + default: + port := portStr + "/" + lwrProtocol + filter, match := l4[port] + if !match { + return api.Denied + } + matches, isDeny := filter.matchesLabels(labels) + if !matches || isDeny { + return api.Denied + } + } + } + return api.Allowed +} + +type L4Policy struct { + Ingress L4DirectionPolicy + Egress L4DirectionPolicy + + AuthMap AuthMap + + // Revision is the repository revision used to generate this policy. + Revision uint64 + + // redirectTypes is a bitmap containing the types of redirect contained by this policy. It + // is computed after the policy maps to avoid scanning them repeatedly when using the + // L4Policy + redirectTypes redirectTypes + + // Endpoint policies using this L4Policy + // These are circular references, cleaned up in Detach() + // This mutex is taken while Endpoint mutex is held, so Endpoint lock + // MUST always be taken before this mutex. + mutex lock.RWMutex + users map[*EndpointPolicy]struct{} +} + +// NewL4Policy creates a new L4Policy +func NewL4Policy(revision uint64) L4Policy { + return L4Policy{ + Ingress: newL4DirectionPolicy(), + Egress: newL4DirectionPolicy(), + Revision: revision, + users: make(map[*EndpointPolicy]struct{}), + } +} + +// insertUser adds a user to the L4Policy so that incremental +// updates of the L4Policy may be forwarded to the users of it. +func (l4 *L4Policy) insertUser(user *EndpointPolicy) { + l4.mutex.Lock() + + // 'users' is set to nil when the policy is detached. This + // happens to the old policy when it is being replaced with a + // new one, or when the last endpoint using this policy is + // removed. + // In the case of an policy update it is possible that an + // endpoint has started regeneration before the policy was + // updated, and that the policy was updated before the said + // endpoint reached this point. In this case the endpoint's + // policy is going to be recomputed soon after and we do + // nothing here. + if l4.users != nil { + l4.users[user] = struct{}{} + } + + l4.mutex.Unlock() +} + +// removeUser removes a user that no longer needs incremental updates +// from the L4Policy. +func (l4 *L4Policy) removeUser(user *EndpointPolicy) { + // 'users' is set to nil when the policy is detached. This + // happens to the old policy when it is being replaced with a + // new one, or when the last endpoint using this policy is + // removed. + l4.mutex.Lock() + if l4.users != nil { + delete(l4.users, user) + } + l4.mutex.Unlock() +} + +// AccumulateMapChanges distributes the given changes to the registered users. +// +// The caller is responsible for making sure the same identity is not +// present in both 'adds' and 'deletes'. +func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, adds, deletes []identity.NumericIdentity) { + port := uint16(l4.Port) + proto := uint8(l4.U8Proto) + derivedFrom := l4.RuleOrigin[cs] + + direction := trafficdirection.Egress + if l4.Ingress { + direction = trafficdirection.Ingress + } + perSelectorPolicy := l4.PerSelectorPolicies[cs] + redirect := perSelectorPolicy.IsRedirect() + hasAuth, authType := perSelectorPolicy.GetAuthType() + isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny + + // Must take a copy of 'users' as GetNamedPort() will lock the Endpoint below and + // the Endpoint lock may not be taken while 'l4.mutex' is held. + l4Policy.mutex.RLock() + users := make(map[*EndpointPolicy]struct{}, len(l4Policy.users)) + for user := range l4Policy.users { + users[user] = struct{}{} + } + l4Policy.mutex.RUnlock() + + for epPolicy := range users { + // Skip if endpoint has no policy maps + if !epPolicy.PolicyOwner.HasBPFPolicyMap() { + continue + } + // resolve named port + if port == 0 && l4.PortName != "" { + port = epPolicy.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto) + if port == 0 { + continue + } + } + key := Key{DestPort: port, Nexthdr: proto, TrafficDirection: direction.Uint8()} + value := NewMapStateEntry(cs, derivedFrom, redirect, isDeny, hasAuth, authType) + + if option.Config.Debug { + authString := "default" + if hasAuth { + authString = authType.String() + } + log.WithFields(logrus.Fields{ + logfields.EndpointSelector: cs, + logfields.AddedPolicyID: adds, + logfields.DeletedPolicyID: deletes, + logfields.Port: port, + logfields.Protocol: proto, + logfields.TrafficDirection: direction, + logfields.IsRedirect: redirect, + logfields.AuthType: authString, + }).Debug("AccumulateMapChanges") + } + epPolicy.policyMapChanges.AccumulateMapChanges(cs, adds, deletes, key, value) + } +} + +// Detach makes the L4Policy ready for garbage collection, removing +// circular pointer references. +// Note that the L4Policy itself is not modified in any way, so that it may still +// be used concurrently. +func (l4 *L4Policy) Detach(selectorCache *SelectorCache) { + l4.Ingress.Detach(selectorCache) + l4.Egress.Detach(selectorCache) + + l4.mutex.Lock() + l4.users = nil + l4.mutex.Unlock() +} + +// Attach makes all the L4Filters to point back to the L4Policy that contains them. +// This is done before the L4Policy is exposed to concurrent access. +func (l4 *L4Policy) Attach(ctx PolicyContext) { + ingressRedirects := l4.Ingress.attach(ctx, l4) + egressRedirects := l4.Egress.attach(ctx, l4) + l4.redirectTypes = ingressRedirects | egressRedirects +} + +// IngressCoversContext checks if the receiver's ingress L4Policy contains +// all `dPorts` and `labels`. +// +// Note: Only used for policy tracing +func (l4 *L4PolicyMap) IngressCoversContext(ctx *SearchContext) api.Decision { + return l4.containsAllL3L4(ctx.From, ctx.DPorts) +} + +// EgressCoversContext checks if the receiver's egress L4Policy contains +// all `dPorts` and `labels`. +// +// Note: Only used for policy tracing +func (l4 *L4PolicyMap) EgressCoversContext(ctx *SearchContext) api.Decision { + return l4.containsAllL3L4(ctx.To, ctx.DPorts) +} + +// HasRedirect returns true if the L4 policy contains at least one port redirection +func (l4 *L4Policy) HasRedirect() bool { + return l4 != nil && l4.redirectTypes != redirectTypeNone +} + +// HasEnvoyRedirect returns true if the L4 policy contains at least one port redirection to Envoy +func (l4 *L4Policy) HasEnvoyRedirect() bool { + return l4 != nil && l4.redirectTypes&redirectTypeEnvoy == redirectTypeEnvoy +} + +// HasProxylibRedirect returns true if the L4 policy contains at least one port redirection to Proxylib +func (l4 *L4Policy) HasProxylibRedirect() bool { + return l4 != nil && l4.redirectTypes&redirectTypeProxylib == redirectTypeProxylib +} + +func (l4 *L4Policy) GetModel() *models.L4Policy { + if l4 == nil { + return nil + } + + ingress := []*models.PolicyRule{} + for _, v := range l4.Ingress.PortRules { + rulesBySelector := map[string][][]string{} + derivedFrom := labels.LabelArrayList{} + for sel, rules := range v.RuleOrigin { + derivedFrom.MergeSorted(rules) + rulesBySelector[sel.String()] = rules.GetModel() + } + ingress = append(ingress, &models.PolicyRule{ + Rule: v.Marshal(), + DerivedFromRules: derivedFrom.GetModel(), + RulesBySelector: rulesBySelector, + }) + } + + egress := []*models.PolicyRule{} + for _, v := range l4.Egress.PortRules { + derivedFrom := labels.LabelArrayList{} + for _, rules := range v.RuleOrigin { + derivedFrom.MergeSorted(rules) + } + egress = append(egress, &models.PolicyRule{ + Rule: v.Marshal(), + DerivedFromRules: derivedFrom.GetModel(), + }) + } + + return &models.L4Policy{ + Ingress: ingress, + Egress: egress, + } +} + +// ProxyPolicy is any type which encodes state needed to redirect to an L7 +// proxy. +type ProxyPolicy interface { + CopyL7RulesPerEndpoint() L7DataMap + GetL7Parser() L7ParserType + GetIngress() bool + GetPort() uint16 + GetListener() string +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go new file mode 100644 index 0000000000..e1c408e11a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go @@ -0,0 +1,1456 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + "net" + "slices" + "strconv" + "testing" + + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/policy/trafficdirection" +) + +var ( + // localHostKey represents an ingress L3 allow from the local host. + localHostKey = Key{ + Identity: identity.ReservedIdentityHost.Uint32(), + TrafficDirection: trafficdirection.Ingress.Uint8(), + } + // localRemoteNodeKey represents an ingress L3 allow from remote nodes. + localRemoteNodeKey = Key{ + Identity: identity.ReservedIdentityRemoteNode.Uint32(), + TrafficDirection: trafficdirection.Ingress.Uint8(), + } + // allKey represents a key for unknown traffic, i.e., all traffic. + allKey = Key{ + Identity: identity.IdentityUnknown.Uint32(), + } +) + +const ( + LabelKeyPolicyDerivedFrom = "io.cilium.policy.derived-from" + LabelAllowLocalHostIngress = "allow-localhost-ingress" + LabelAllowRemoteHostIngress = "allow-remotehost-ingress" + LabelAllowAnyIngress = "allow-any-ingress" + LabelAllowAnyEgress = "allow-any-egress" + LabelVisibilityAnnotation = "visibility-annotation" +) + +// MapState is a map interface for policy maps +type MapState interface { + Get(Key) (MapStateEntry, bool) + Insert(Key, MapStateEntry) + Delete(Key) + // ForEach allows iteration over the MapStateEntries. It returns true iff + // the iteration was not stopped early by the callback. + ForEach(func(Key, MapStateEntry) (cont bool)) (complete bool) + // ForEachAllow behaves like ForEach, but only iterates MapStateEntries which are not denies. + ForEachAllow(func(Key, MapStateEntry) (cont bool)) (complete bool) + // ForEachDeny behaves like ForEach, but only iterates MapStateEntries which are denies. + ForEachDeny(func(Key, MapStateEntry) (cont bool)) (complete bool) + GetIdentities(*logrus.Logger) ([]int64, []int64) + GetDenyIdentities(*logrus.Logger) ([]int64, []int64) + RevertChanges(ChangeState) + AddVisibilityKeys(PolicyOwner, uint16, *VisibilityMetadata, ChangeState) + Len() int + Equals(MapState) bool + Diff(t *testing.T, expected MapState) string + + allowAllIdentities(ingress, egress bool) + determineAllowLocalhostIngress() + deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool + denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState) + deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState) +} + +// mapState is a state of a policy map. +type mapState struct { + allows mapStateMap + denies mapStateMap +} + +// mapStateMap is a convience type representing the actual structure mapping +// policymap keys to policymap entries. +type mapStateMap map[Key]MapStateEntry + +func (m mapStateMap) insert(k Key, e MapStateEntry) { + if m == nil { + n := make(mapStateMap) + m = n + } + m[k] = e +} + +type Identities interface { + GetNetsLocked(identity.NumericIdentity) []*net.IPNet +} + +// Key is the userspace representation of a policy key in BPF. It is +// intentionally duplicated from pkg/maps/policymap to avoid pulling in the +// BPF dependency to this package. +type Key struct { + // Identity is the numeric identity to / from which traffic is allowed. + Identity uint32 + // DestPort is the port at L4 to / from which traffic is allowed, in + // host-byte order. + DestPort uint16 + // NextHdr is the protocol which is allowed. + Nexthdr uint8 + // TrafficDirection indicates in which direction Identity is allowed + // communication (egress or ingress). + TrafficDirection uint8 +} + +// String returns a string representation of the Key +func (k Key) String() string { + return "Identity=" + strconv.FormatUint(uint64(k.Identity), 10) + + ",DestPort=" + strconv.FormatUint(uint64(k.DestPort), 10) + + ",Nexthdr=" + strconv.FormatUint(uint64(k.Nexthdr), 10) + + ",TrafficDirection=" + strconv.FormatUint(uint64(k.TrafficDirection), 10) +} + +// IsIngress returns true if the key refers to an ingress policy key +func (k Key) IsIngress() bool { + return k.TrafficDirection == trafficdirection.Ingress.Uint8() +} + +// IsEgress returns true if the key refers to an egress policy key +func (k Key) IsEgress() bool { + return k.TrafficDirection == trafficdirection.Egress.Uint8() +} + +// PortProtoIsBroader returns true if the receiver Key has broader +// port-protocol than the argument Key. That is a port-protocol +// that covers the argument Key's port-protocol and is larger. +// An equal port-protocol will return false. +func (k Key) PortProtoIsBroader(c Key) bool { + return k.DestPort == 0 && c.DestPort != 0 || + k.Nexthdr == 0 && c.Nexthdr != 0 +} + +// PortProtoIsEqual returns true if the port-protocols of the +// two keys are exactly equal. +func (k Key) PortProtoIsEqual(c Key) bool { + return k.DestPort == c.DestPort && k.Nexthdr == c.Nexthdr +} + +type Keys map[Key]struct{} + +type MapStateOwner interface{} + +// MapStateEntry is the configuration associated with a Key in a +// MapState. This is a minimized version of policymap.PolicyEntry. +type MapStateEntry struct { + // The proxy port, in host byte order. + // If 0 (default), there is no proxy redirection for the corresponding + // Key. Any other value signifies proxy redirection. + ProxyPort uint16 + + // IsDeny is true when the policy should be denied. + IsDeny bool + + // hasAuthType is 'DefaultAuthType' when policy has no explicit AuthType set. In this case the + // value of AuthType is derived from more generic entries covering this entry. + hasAuthType HasAuthType + + // AuthType is non-zero when authentication is required for the traffic to be allowed. + AuthType AuthType + + // DerivedFromRules tracks the policy rules this entry derives from + // In sorted order. + DerivedFromRules labels.LabelArrayList + + // Owners collects the keys in the map and selectors in the policy that require this key to be present. + // TODO: keep track which selector needed the entry to be deny, redirect, or just allow. + owners map[MapStateOwner]struct{} + + // dependents contains the keys for entries create based on this entry. These entries + // will be deleted once all of the owners are deleted. + dependents Keys +} + +// NewMapStateEntry creates a map state entry. If redirect is true, the +// caller is expected to replace the ProxyPort field before it is added to +// the actual BPF map. +// 'cs' is used to keep track of which policy selectors need this entry. If it is 'nil' this entry +// will become sticky and cannot be completely removed via incremental updates. Even in this case +// the entry may be overridden or removed by a deny entry. +func NewMapStateEntry(cs MapStateOwner, derivedFrom labels.LabelArrayList, redirect, deny bool, hasAuth HasAuthType, authType AuthType) MapStateEntry { + var proxyPort uint16 + if redirect { + // Any non-zero value will do, as the callers replace this with the + // actual proxy listening port number before the entry is added to the + // actual bpf map. + proxyPort = 1 + } + + return MapStateEntry{ + ProxyPort: proxyPort, + DerivedFromRules: derivedFrom, + IsDeny: deny, + hasAuthType: hasAuth, + AuthType: authType, + owners: map[MapStateOwner]struct{}{cs: {}}, + } +} + +// AddDependent adds 'key' to the set of dependent keys. +func (e *MapStateEntry) AddDependent(key Key) { + if e.dependents == nil { + e.dependents = make(Keys, 1) + } + e.dependents[key] = struct{}{} +} + +// RemoveDependent removes 'key' from the set of dependent keys. +func (e *MapStateEntry) RemoveDependent(key Key) { + delete(e.dependents, key) + // Nil the map when empty. This is mainly to make unit testing easier. + if len(e.dependents) == 0 { + e.dependents = nil + } +} + +// HasDependent returns true if the 'key' is contained +// within the set of dependent keys +func (e *MapStateEntry) HasDependent(key Key) bool { + if e.dependents == nil { + return false + } + _, ok := e.dependents[key] + return ok +} + +var worldNets = map[identity.NumericIdentity][]*net.IPNet{ + identity.ReservedIdentityWorld: { + {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)}, + {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)}, + }, + identity.ReservedIdentityWorldIPv4: { + {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)}, + }, + identity.ReservedIdentityWorldIPv6: { + {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)}, + }, +} + +// getNets returns the most specific CIDR for an identity. For the "World" identity +// it returns both IPv4 and IPv6. +func getNets(identities Identities, ident uint32) []*net.IPNet { + // World identities are handled explicitly for two reasons: + // 1. 'identities' may be nil, but world identities are still expected to be considered + // 2. SelectorCache is not be informed of reserved/world identities in all test cases + id := identity.NumericIdentity(ident) + if id <= identity.ReservedIdentityWorldIPv6 { + return worldNets[id] + } + // CIDR identities have a local scope, so we can skip the rest if id is not of local scope. + if !id.HasLocalScope() || identities == nil { + return nil + } + return identities.GetNetsLocked(id) +} + +// NewMapState creates a new MapState interface +func NewMapState(initMap map[Key]MapStateEntry) MapState { + return newMapState(initMap) +} + +func newMapState(initMap map[Key]MapStateEntry) *mapState { + m := &mapState{ + allows: make(map[Key]MapStateEntry), + denies: make(map[Key]MapStateEntry), + } + for k, v := range initMap { + m.Insert(k, v) + } + return m +} + +// Get the MapStateEntry that matches the Key. +func (ms *mapState) Get(k Key) (MapStateEntry, bool) { + v, ok := ms.denies[k] + if ok { + return v, ok + } + v, ok = ms.allows[k] + return v, ok +} + +// Insert the Key and matcthing MapStateEntry into the +// MapState +func (ms *mapState) Insert(k Key, v MapStateEntry) { + if v.IsDeny { + delete(ms.allows, k) + ms.denies.insert(k, v) + } else { + delete(ms.denies, k) + ms.allows.insert(k, v) + } +} + +// Delete removes the Key an related MapStateEntry. +func (ms *mapState) Delete(k Key) { + delete(ms.allows, k) + delete(ms.denies, k) +} + +// ForEach iterates over every Key MapStateEntry and stops when the function +// argument returns false. It returns false iff the iteration was cut short. +func (ms *mapState) ForEach(f func(Key, MapStateEntry) (cont bool)) (complete bool) { + if complete := ms.ForEachAllow(f); !complete { + return complete + } + + return ms.ForEachDeny(f) +} + +// ForEachAllow iterates over every Key MapStateEntry that isn't a deny and +// stops when the function argument returns false +func (ms *mapState) ForEachAllow(f func(Key, MapStateEntry) (cont bool)) (complete bool) { + for k, v := range ms.allows { + if !f(k, v) { + return false + } + } + return true +} + +// ForEachDeny iterates over every Key MapStateEntry that is a deny and +// stops when the function argument returns false +func (ms *mapState) ForEachDeny(f func(Key, MapStateEntry) (cont bool)) (complete bool) { + for k, v := range ms.denies { + if !f(k, v) { + return false + } + } + return true +} + +// Len returns the length of the map +func (ms *mapState) Len() int { + return len(ms.allows) + len(ms.denies) +} + +// Equals determines if this MapState is equal to the +// argument MapState +func (msA *mapState) Equals(msB MapState) bool { + if msA.Len() != msB.Len() { + return false + } + + return msA.ForEach(func(kA Key, vA MapStateEntry) bool { + if vB, ok := msB.Get(kA); ok { + return (&vB).DatapathEqual(&vA) + } + return false + }) +} + +// Diff returns the string of differences between 'obtained' and 'expected' prefixed with +// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively. +// For use in debugging. +func (obtained *mapState) Diff(_ *testing.T, expected MapState) (res string) { + expected.ForEach(func(kE Key, vE MapStateEntry) bool { + if vO, ok := obtained.Get(kE); ok { + if !(&vO).DatapathEqual(&vE) { + res += "- " + kE.String() + ": " + vE.String() + "\n" + res += "+ " + kE.String() + ": " + vO.String() + "\n" + } + } else { + res += "- " + kE.String() + ": " + vE.String() + "\n" + } + return true + }) + obtained.ForEach(func(kE Key, vE MapStateEntry) bool { + if vO, ok := expected.Get(kE); !ok { + res += "+ " + kE.String() + ": " + vO.String() + "\n" + } + return true + }) + return res +} + +// AddDependent adds 'key' to the set of dependent keys. +func (ms *mapState) AddDependent(owner Key, dependent Key, changes ChangeState) { + if e, exists := ms.allows[owner]; exists { + ms.addDependentOnEntry(owner, e, dependent, changes) + } else if e, exists := ms.denies[owner]; exists { + ms.addDependentOnEntry(owner, e, dependent, changes) + } +} + +// addDependentOnEntry adds 'dependent' to the set of dependent keys of 'e'. +func (ms *mapState) addDependentOnEntry(owner Key, e MapStateEntry, dependent Key, changes ChangeState) { + if _, exists := e.dependents[dependent]; !exists { + if changes.Old != nil { + changes.Old[owner] = e + } + e.AddDependent(dependent) + ms.Insert(owner, e) + } +} + +// RemoveDependent removes 'key' from the list of dependent keys. +// This is called when a dependent entry is being deleted. +// If 'old' is not nil, then old value is added there before any modifications. +func (ms *mapState) RemoveDependent(owner Key, dependent Key, changes ChangeState) { + if e, exists := ms.allows[owner]; exists { + changes.insertOldIfNotExists(owner, e) + e.RemoveDependent(dependent) + delete(ms.denies, owner) + ms.allows.insert(owner, e) + return + } + + if e, exists := ms.denies[owner]; exists { + changes.insertOldIfNotExists(owner, e) + e.RemoveDependent(dependent) + delete(ms.allows, owner) + ms.denies.insert(owner, e) + } +} + +// Merge adds owners, dependents, and DerivedFromRules from a new 'entry' to an existing +// entry 'e'. 'entry' is not modified. +// IsDeny, ProxyPort, and AuthType are merged by giving precedence to deny over non-deny, proxy +// redirection over no proxy redirection, and explicit auth type over default auth type. +func (e *MapStateEntry) Merge(entry *MapStateEntry) { + // Deny is sticky + if !e.IsDeny { + e.IsDeny = entry.IsDeny + } + + // Deny entries have no proxy redirection nor auth requirement + if e.IsDeny { + e.ProxyPort = 0 + e.hasAuthType = DefaultAuthType + e.AuthType = AuthTypeDisabled + } else { + // Proxy port takes precedence, but may be updated + if entry.ProxyPort != 0 { + e.ProxyPort = entry.ProxyPort + } + + // Explicit auth takes precedence over defaulted one. + if entry.hasAuthType == ExplicitAuthType { + if e.hasAuthType == ExplicitAuthType { + // Numerically higher AuthType takes precedence when both are explicitly defined + if entry.AuthType > e.AuthType { + e.AuthType = entry.AuthType + } + } else { + e.hasAuthType = ExplicitAuthType + e.AuthType = entry.AuthType + } + } else if e.hasAuthType == DefaultAuthType { + e.AuthType = entry.AuthType // new default takes precedence + } + } + + if e.owners == nil && len(entry.owners) > 0 { + e.owners = make(map[MapStateOwner]struct{}, len(entry.owners)) + } + for k, v := range entry.owners { + e.owners[k] = v + } + + // merge dependents + for k := range entry.dependents { + e.AddDependent(k) + } + + // merge DerivedFromRules + if len(entry.DerivedFromRules) > 0 { + e.DerivedFromRules.MergeSorted(entry.DerivedFromRules) + } +} + +// IsRedirectEntry returns true if e contains a redirect +func (e *MapStateEntry) IsRedirectEntry() bool { + return e.ProxyPort != 0 +} + +// DatapathEqual returns true of two entries are equal in the datapath's PoV, +// i.e., IsDeny, ProxyPort and AuthType are the same for both entries. +func (e *MapStateEntry) DatapathEqual(o *MapStateEntry) bool { + if e == nil || o == nil { + return e == o + } + + return e.IsDeny == o.IsDeny && e.ProxyPort == o.ProxyPort && e.AuthType == o.AuthType +} + +// DeepEqual is a manually generated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +// Defined manually due to deepequal-gen not supporting interface types. +// 'cachedNets' member is ignored in comparison, as it is a cached value and +// makes no functional difference. +func (e *MapStateEntry) DeepEqual(o *MapStateEntry) bool { + if !e.DatapathEqual(o) { + return false + } + + if !e.DerivedFromRules.DeepEqual(&o.DerivedFromRules) { + return false + } + + if len(e.owners) != len(o.owners) { + return false + } + for k := range o.owners { + if _, exists := e.owners[k]; !exists { + return false + } + } + + if len(e.dependents) != len(o.dependents) { + return false + } + for k := range o.dependents { + if _, exists := e.dependents[k]; !exists { + return false + } + } + + // ignoring cachedNets + + return true +} + +// String returns a string representation of the MapStateEntry +func (e MapStateEntry) String() string { + return "ProxyPort=" + strconv.FormatUint(uint64(e.ProxyPort), 10) + + ",IsDeny=" + strconv.FormatBool(e.IsDeny) + + ",AuthType=" + e.AuthType.String() + + ",DerivedFromRules=" + fmt.Sprintf("%v", e.DerivedFromRules) +} + +// denyPreferredInsert inserts a key and entry into the map by given preference +// to deny entries, and L3-only deny entries over L3-L4 allows. +// This form may be used when a full policy is computed and we are not yet interested +// in accumulating incremental changes. +// Caller may insert the same MapStateEntry multiple times for different Keys, but all from the same +// owner. +func (ms *mapState) denyPreferredInsert(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures) { + // Enforce nil values from NewMapStateEntry + newEntry.dependents = nil + + ms.denyPreferredInsertWithChanges(newKey, newEntry, identities, features, ChangeState{}) +} + +// addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil. +func (ms *mapState) addKeyWithChanges(key Key, entry MapStateEntry, changes ChangeState) { + // Keep all owners that need this entry so that it is deleted only if all the owners delete their contribution + var datapathEqual bool + oldEntry, exists := ms.Get(key) + if exists { + // Deny entry can only be overridden by another deny entry + if oldEntry.IsDeny && !entry.IsDeny { + return + } + + if entry.DeepEqual(&oldEntry) { + return // nothing to do + } + + // Save old value before any changes, if desired + if changes.Old != nil { + changes.insertOldIfNotExists(key, oldEntry) + } + + // Compare for datapath equalness before merging, as the old entry is updated in + // place! + datapathEqual = oldEntry.DatapathEqual(&entry) + oldEntry.Merge(&entry) + ms.Insert(key, oldEntry) + } else { + // Newly inserted entries must have their own containers, so that they + // remain separate when new owners/dependents are added to existing entries + entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules) + entry.owners = maps.Clone(entry.owners) + entry.dependents = maps.Clone(entry.dependents) + ms.Insert(key, entry) + } + + // Record an incremental Add if desired and entry is new or changed + if changes.Adds != nil && (!exists || !datapathEqual) { + changes.Adds[key] = struct{}{} + // Key add overrides any previous delete of the same key + if changes.Deletes != nil { + delete(changes.Deletes, key) + } + } +} + +// deleteKeyWithChanges deletes a 'key' from 'keys' keeping track of incremental changes in 'adds' and 'deletes'. +// The key is unconditionally deleted if 'cs' is nil, otherwise only the contribution of this 'cs' is removed. +func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState) { + if entry, exists := ms.Get(key); exists { + // Save old value before any changes, if desired + oldAdded := changes.insertOldIfNotExists(key, entry) + if owner != nil { + // remove the contribution of the given selector only + if _, exists = entry.owners[owner]; exists { + // Remove the contribution of this selector from the entry + delete(entry.owners, owner) + if ownerKey, ok := owner.(Key); ok { + ms.RemoveDependent(ownerKey, key, changes) + } + // key is not deleted if other owners still need it + if len(entry.owners) > 0 { + return + } + } else { + // 'owner' was not found, do not change anything + if oldAdded { + delete(changes.Old, key) + } + return + } + } + + // Remove this key from all owners' dependents maps if no owner was given. + // Owner is nil when deleting more specific entries (e.g., L3/L4) when + // adding deny entries that cover them (e.g., L3-deny). + if owner == nil { + for owner := range entry.owners { + if owner != nil { + if ownerKey, ok := owner.(Key); ok { + ms.RemoveDependent(ownerKey, key, changes) + } + } + } + } + + // Check if dependent entries need to be deleted as well + for k := range entry.dependents { + ms.deleteKeyWithChanges(k, key, changes) + } + if changes.Deletes != nil { + changes.Deletes[key] = struct{}{} + // Remove a potential previously added key + if changes.Adds != nil { + delete(changes.Adds, key) + } + } + + delete(ms.allows, key) + delete(ms.denies, key) + } +} + +// identityIsSupersetOf compares two entries and keys to see if the primary identity contains +// the compared identity. This means that either that primary identity is 0 (i.e. it is a superset +// of every other identity), or one of the subnets of the primary identity fully contains or is +// equal to one of the subnets in the compared identity (note:this covers cases like "reserved:world"). +func identityIsSupersetOf(primaryIdentity, compareIdentity uint32, identities Identities) bool { + // If the identities are equal then neither is a superset (for the purposes of our business logic). + if primaryIdentity == compareIdentity { + return false + } + + // Consider an identity that selects a broader CIDR as a superset of + // an identity that selects a narrower CIDR. For instance, an identity + // corresponding to 192.0.0.0/16 is a superset of the identity that + // corresponds to 192.0.2.3/32. + // + // The reasons we need to do this are surprisingly complex, taking into + // consideration design decisions around the handling of ToFQDNs policy + // and how L4PolicyMap/L4Filter structures cache the policies with + // respect to specific CIDRs. More specifically: + // - At the time of initial L4Filter creation, it is not known which + // specific CIDRs (or corresponding identities) are selected by a + // toFQDNs rule in the policy engine. + // - It is possible to have a CIDR deny rule that should deny peers + // that are allowed by a ToFQDNs statement. The precedence rules in + // the API for such policy conflicts define that the deny should take + // precedence. + // - Consider a case where there is a deny rule for 192.0.0.0/16 with + // an allow rule for cilium.io, and one of the IP addresses for + // cilium.io is 192.0.2.3. + // - If the IP for cilium.io was known at initial policy computation + // time, then we would calculate the MapState from the L4Filters and + // immediately determine that there is a conflict between the + // L4Filter that denies 192.0.0.0/16 vs. the allow for 192.0.2.3. + // From this we could immediately discard the "allow to 192.0.2.3" + // policymap entry during policy calculation. This would satisfy the + // API constraint that deny rules take precedence over allow rules. + // However, this is not the case for ToFQDNs -- the IPs are not known + // until DNS resolution time by the selected application / endpoint. + // - In order to make ToFQDNs policy implementation efficient, it uses + // a shorter incremental policy computation path that attempts to + // directly implement the ToFQDNs allow into a MapState entry without + // reaching back up to the L4Filter layer to iterate all selectors + // to determine traffic reachability for this newly learned IP. + // - As such, when the new ToFQDNs allow for the 192.0.2.3 IP address + // is implemented, we must iterate back through all existing MapState + // entries to determine whether any of the other map entries already + // denies this traffic by virtue of the IP prefix being a superset of + // this new allow. This allows us to ensure that the broader CIDR + // deny semantics are correctly applied when there is a combination + // of CIDR deny rules and ToFQDNs allow rules. + // + // An alternative to this approach might be to change the ToFQDNs + // policy calculation layer to reference back to the L4Filter layer, + // and perhaps introduce additional CIDR caching somewhere there so + // that this policy computation can be efficient while handling DNS + // responses. As of the writing of this message, such there is no + // active proposal to implement this proposal. As a result, any time + // there is an incremental policy update for a new map entry, we must + // iterate through all entries in the map and re-evaluate superset + // relationships for deny entries to ensure that policy precedence is + // correctly implemented between the new and old entries, taking into + // account whether the identities may represent CIDRs that have a + // superset relationship. + return primaryIdentity == 0 && compareIdentity != 0 || + ip.NetsContainsAny(getNets(identities, primaryIdentity), + getNets(identities, compareIdentity)) +} + +// protocolsMatch checks to see if two given keys match on protocol. +// This means that either one of them covers all protocols or they +// are equal. +func protocolsMatch(a, b Key) bool { + return a.Nexthdr == 0 || b.Nexthdr == 0 || a.Nexthdr == b.Nexthdr +} + +// RevertChanges undoes changes to 'keys' as indicated by 'changes.adds' and 'changes.old' collected via +// denyPreferredInsertWithChanges(). +func (ms *mapState) RevertChanges(changes ChangeState) { + for k := range changes.Adds { + delete(ms.allows, k) + delete(ms.denies, k) + } + // 'old' contains all the original values of both modified and deleted entries + for k, v := range changes.Old { + ms.Insert(k, v) + } +} + +// denyPreferredInsertWithChanges contains the most important business logic for policy insertions. It inserts +// a key and entry into the map by giving preference to deny entries, and L3-only deny entries over L3-L4 allows. +// Incremental changes performed are recorded in 'adds' and 'deletes', if not nil. +// See https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536 for details +func (ms *mapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState) { + // Skip deny rules processing if the policy in this direction has no deny rules + if !features.contains(denyRules) { + ms.authPreferredInsert(newKey, newEntry, features, changes) + return + } + + allCpy := allKey + allCpy.TrafficDirection = newKey.TrafficDirection + // If we have a deny "all" we don't accept any kind of map entry. + if _, ok := ms.denies[allCpy]; ok { + return + } + if newEntry.IsDeny { + ms.ForEachAllow(func(k Key, v MapStateEntry) bool { + // Protocols and traffic directions that don't match ensure that the policies + // do not interact in anyway. + if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) { + return true + } + + if identityIsSupersetOf(k.Identity, newKey.Identity, identities) { + if newKey.PortProtoIsBroader(k) { + // If this iterated-allow-entry is a superset of the new-entry + // and it has a more specific port-protocol than the new-entry + // then an additional copy of the new-entry with the more + // specific port-protocol of the iterated-allow-entry must be inserted. + newKeyCpy := newKey + newKeyCpy.DestPort = k.DestPort + newKeyCpy.Nexthdr = k.Nexthdr + l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled) + ms.addKeyWithChanges(newKeyCpy, l3l4DenyEntry, changes) + // L3-only entries can be deleted incrementally so we need to track their + // effects on other entries so that those effects can be reverted when the + // identity is removed. + newEntry.AddDependent(newKeyCpy) + } + } else if (newKey.Identity == k.Identity || + identityIsSupersetOf(newKey.Identity, k.Identity, identities)) && + (newKey.PortProtoIsBroader(k) || newKey.PortProtoIsEqual(k)) { + // If the new-entry is a superset (or equal) of the iterated-allow-entry and + // the new-entry has a broader (or equal) port-protocol then we + // should delete the iterated-allow-entry + ms.deleteKeyWithChanges(k, nil, changes) + } + return true + }) + + bailed := false + ms.ForEachDeny(func(k Key, v MapStateEntry) bool { + // Protocols and traffic directions that don't match ensure that the policies + // do not interact in anyway. + if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) { + return true + } + + if (newKey.Identity == k.Identity || + identityIsSupersetOf(k.Identity, newKey.Identity, identities)) && + k.DestPort == 0 && k.Nexthdr == 0 && + !v.HasDependent(newKey) { + // If this iterated-deny-entry is a supserset (or equal) of the new-entry and + // the iterated-deny-entry is an L3-only policy then we + // should not insert the new entry (as long as it is not one + // of the special L4-only denies we created to cover the special + // case of a superset-allow with a more specific port-protocol). + // + // NOTE: This condition could be broader to reject more deny entries, + // but there *may* be performance tradeoffs. + bailed = true + return false + } else if (newKey.Identity == k.Identity || + identityIsSupersetOf(newKey.Identity, k.Identity, identities)) && + newKey.DestPort == 0 && newKey.Nexthdr == 0 && + !newEntry.HasDependent(k) { + // If this iterated-deny-entry is a subset (or equal) of the new-entry and + // the new-entry is an L3-only policy then we + // should delete the iterated-deny-entry (as long as it is not one + // of the special L4-only denies we created to cover the special + // case of a superset-allow with a more specific port-protocol). + // + // NOTE: This condition could be broader to reject more deny entries, + // but there *may* be performance tradeoffs. + ms.deleteKeyWithChanges(k, nil, changes) + } + return true + }) + + if !bailed { + ms.addKeyWithChanges(newKey, newEntry, changes) + } + } else { + // NOTE: We do not delete redundant allow entries. + bailed := false + ms.ForEachDeny(func(k Key, v MapStateEntry) bool { + // Protocols and traffic directions that don't match ensure that the policies + // do not interact in anyway. + if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) { + return true + } + if identityIsSupersetOf(newKey.Identity, k.Identity, identities) { + if k.PortProtoIsBroader(newKey) { + // If the new-entry is *only* superset of the iterated-deny-entry + // and the new-entry has a more specific port-protocol than the + // iterated-deny-entry then an additional copy of the iterated-deny-entry + // with the more specific port-porotocol of the new-entry must + // be added. + denyKeyCpy := k + denyKeyCpy.DestPort = newKey.DestPort + denyKeyCpy.Nexthdr = newKey.Nexthdr + l3l4DenyEntry := NewMapStateEntry(k, v.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled) + ms.addKeyWithChanges(denyKeyCpy, l3l4DenyEntry, changes) + // L3-only entries can be deleted incrementally so we need to track their + // effects on other entries so that those effects can be reverted when the + // identity is removed. + ms.addDependentOnEntry(k, v, denyKeyCpy, changes) + } + } else if (k.Identity == newKey.Identity || + identityIsSupersetOf(k.Identity, newKey.Identity, identities)) && + (k.PortProtoIsBroader(newKey) || k.PortProtoIsEqual(newKey)) && + !v.HasDependent(newKey) { + // If the iterated-deny-entry is a superset (or equal) of the new-entry and has a + // broader (or equal) port-protocol than the new-entry then the new + // entry should not be inserted. + bailed = true + return false + } + + return true + }) + + if !bailed { + ms.authPreferredInsert(newKey, newEntry, features, changes) + } + } +} + +// IsSuperSetOf checks if the receiver Key is a superset of the argument Key, and returns a +// specificity score of the receiver key (higher score is more specific), if so. Being a superset +// means that the receiver key would match all the traffic of the argument key without being the +// same key. Hence, a L3-only key is not a superset of a L4-only key, as the L3-only key would match +// the traffic for the given L3 only, while the L4-only key matches traffic on the given port for +// all the L3's. +// Returns 0 if the receiver key is not a superset of the argument key. +// +// Specificity score for all possible superset wildcard patterns. Datapath requires proto to be specified if port is specified. +// x. L3/proto/port +// 1. */*/* +// 2. */proto/* +// 3. */proto/port +// 4. ID/*/* +// 5. ID/proto/* +// ( ID/proto/port can not be superset of anything ) +func (k Key) IsSuperSetOf(other Key) int { + if k.TrafficDirection != other.TrafficDirection { + return 0 // TrafficDirection must match for 'k' to be a superset of 'other' + } + if k.Identity == 0 { + if other.Identity == 0 { + if k.Nexthdr == 0 { // k.DestPort == 0 is implied + if other.Nexthdr != 0 { + return 1 // */*/* is a superset of */proto/x + } // else both are */*/* + } else if k.Nexthdr == other.Nexthdr { + if k.DestPort == 0 && other.DestPort != 0 { + return 2 // */proto/* is a superset of */proto/port + } // else more specific or different ports + } // else more specific or different protocol + } else { + // Wildcard L3 is a superset of a specific L3 only if wildcard L3 is also wildcard L4, or the L4's match between the keys + if k.Nexthdr == 0 { // k.DestPort == 0 is implied + return 1 // */*/* is a superset of ID/x/x + } else if k.Nexthdr == other.Nexthdr { + if k.DestPort == 0 { + return 2 // */proto/* is a superset of ID/proto/x + } else if k.DestPort == other.DestPort { + return 3 // */proto/port is a superset of ID/proto/port + } // else more specific or different ports + } // else more specific or different protocol + } + } else if k.Identity == other.Identity { + if k.Nexthdr == 0 { + if other.Nexthdr != 0 { + return 4 // ID/*/* is a superset of ID/proto/x + } // else both are ID/*/* + } else if k.Nexthdr == other.Nexthdr { + if k.DestPort == 0 && other.DestPort != 0 { + return 5 // ID/proto/* is a superset of ID/proto/port + } // else more specific or different ports + } // else more specific or different protocol + } // else more specific or different identity + return 0 +} + +// authPreferredInsert applies AuthType of a more generic entry to more specific entries, if not +// explicitly specified. +// +// This function is expected to be called for a map insertion after deny +// entry evaluation. If there is a map entry that is a superset of 'newKey' +// which denies traffic matching 'newKey', then this function should not be called. +func (ms *mapState) authPreferredInsert(newKey Key, newEntry MapStateEntry, features policyFeatures, changes ChangeState) { + if features.contains(authRules) { + if newEntry.hasAuthType == DefaultAuthType { + // New entry has a default auth type. + // Fill in the AuthType from more generic entries with an explicit auth type + maxSpecificity := 0 + l3l4State := newMapState(nil) + + ms.ForEachAllow(func(k Key, v MapStateEntry) bool { + // Only consider the same Traffic direction + if newKey.TrafficDirection != k.TrafficDirection { + return true + } + + // Nothing to be done if entry has default AuthType + if v.hasAuthType == DefaultAuthType { + return true + } + + // Find out if 'k' is an identity-port-proto superset of 'newKey' + if specificity := k.IsSuperSetOf(newKey); specificity > 0 { + if specificity > maxSpecificity { + // AuthType from the most specific superset is + // applied to 'newEntry' + newEntry.AuthType = v.AuthType + maxSpecificity = specificity + } + } else { + // Check if a new L3L4 entry must be created due to L3-only + // 'k' specifying an explicit AuthType and an L4-only 'newKey' not + // having an explicit AuthType. In this case AuthType should + // only override the AuthType for the L3 & L4 combination, + // not L4 in general. + // + // These need to be collected and only added if there is a + // superset key of newKey with an explicit auth type. In + // this case AuthType of the new L4-only entry was + // overridden by a more generic entry and 'max_specificity > + // 0' after the loop. + if k.Identity != 0 && k.Nexthdr == 0 && newKey.Identity == 0 && newKey.Nexthdr != 0 { + newKeyCpy := k + newKeyCpy.DestPort = newKey.DestPort + newKeyCpy.Nexthdr = newKey.Nexthdr + l3l4AuthEntry := NewMapStateEntry(k, v.DerivedFromRules, false, false, DefaultAuthType, v.AuthType) + l3l4AuthEntry.DerivedFromRules.MergeSorted(newEntry.DerivedFromRules) + l3l4State.allows[newKeyCpy] = l3l4AuthEntry + } + } + return true + }) + // Add collected L3/L4 entries if the auth type of the new entry was not + // overridden by a more generic entry. If it was overridden, the new L3L4 + // entries are not needed as the L4-only entry with an overridden AuthType + // will be matched before the L3-only entries in the datapath. + if maxSpecificity == 0 { + l3l4State.ForEach(func(k Key, v MapStateEntry) bool { + ms.addKeyWithChanges(k, v, changes) + // L3-only entries can be deleted incrementally so we need to track their + // effects on other entries so that those effects can be reverted when the + // identity is removed. + newEntry.AddDependent(k) + return true + }) + } + } else { + // New entry has an explicit auth type. + // Check if the new entry is the most specific superset of any other entry + // with the default auth type, and propagate the auth type from the new + // entry to such entries. + explicitSubsetKeys := make(Keys) + defaultSubsetKeys := make(map[Key]int) + + ms.ForEachAllow(func(k Key, v MapStateEntry) bool { + // Only consider the same Traffic direction + if newKey.TrafficDirection != k.TrafficDirection { + return true + } + + // Find out if 'newKey' is a superset of 'k' + if specificity := newKey.IsSuperSetOf(k); specificity > 0 { + if v.hasAuthType == ExplicitAuthType { + // store for later comparison + explicitSubsetKeys[k] = struct{}{} + } else { + defaultSubsetKeys[k] = specificity + } + } else if v.hasAuthType == DefaultAuthType { + // Check if a new L3L4 entry must be created due to L3-only + // 'newKey' with an explicit AuthType and an L4-only 'k' not + // having an explicit AuthType. In this case AuthType should + // only override the AuthType for the L3 & L4 combination, + // not L4 in general. + if newKey.Identity != 0 && newKey.Nexthdr == 0 && k.Identity == 0 && k.Nexthdr != 0 { + newKeyCpy := newKey + newKeyCpy.DestPort = k.DestPort + newKeyCpy.Nexthdr = k.Nexthdr + l3l4AuthEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, false, false, DefaultAuthType, newEntry.AuthType) + l3l4AuthEntry.DerivedFromRules.MergeSorted(v.DerivedFromRules) + ms.addKeyWithChanges(newKeyCpy, l3l4AuthEntry, changes) + // L3-only entries can be deleted incrementally so we need to track their + // effects on other entries so that those effects can be reverted when the + // identity is removed. + newEntry.AddDependent(newKeyCpy) + } + } + + return true + }) + // Find out if this newKey is the most specific superset for all the subset keys with default auth type + Next: + for k, specificity := range defaultSubsetKeys { + for l := range explicitSubsetKeys { + if s := l.IsSuperSetOf(k); s > specificity { + // k has a more specific superset key than the newKey, skip + continue Next + } + } + // newKey is the most specific superset with an explicit auth type, + // propagate auth type from newEntry to the entry of k + v, _ := ms.Get(k) + v.AuthType = newEntry.AuthType + ms.addKeyWithChanges(k, v, changes) // Update the map value + } + } + } + ms.addKeyWithChanges(newKey, newEntry, changes) +} + +var visibilityDerivedFromLabels = labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelVisibilityAnnotation, labels.LabelSourceReserved), +} + +var visibilityDerivedFrom = labels.LabelArrayList{visibilityDerivedFromLabels} + +// insertIfNotExists only inserts `key=value` if `key` does not exist in keys already +// returns 'true' if 'key=entry' was added to 'keys' +func (changes *ChangeState) insertOldIfNotExists(key Key, entry MapStateEntry) bool { + if changes == nil || changes.Old == nil { + return false + } + if _, exists := changes.Old[key]; !exists { + // Only insert the old entry if the entry was not first added on this round of + // changes. + if _, added := changes.Adds[key]; !added { + // new containers to keep this entry separate from the one that may remain in 'keys' + entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules) + entry.owners = maps.Clone(entry.owners) + entry.dependents = maps.Clone(entry.dependents) + + changes.Old[key] = entry + return true + } + } + return false +} + +// AddVisibilityKeys adjusts and expands PolicyMapState keys +// and values to redirect for visibility on the port of the visibility +// annotation while still denying traffic on this port for identities +// for which the traffic is denied. +// +// Datapath lookup order is, from highest to lowest precedence: +// 1. L3/L4 +// 2. L4-only (wildcard L3) +// 3. L3-only (wildcard L4) +// 4. Allow-all +// +// This means that the L4-only allow visibility key can only be added if there is an +// allow-all key, and all L3-only deny keys are expanded to L3/L4 keys. If no +// L4-only key is added then also the L3-only allow keys need to be expanded to +// L3/L4 keys for visibility redirection. In addition the existing L3/L4 and L4-only +// allow keys need to be redirected to the proxy port, if not already redirected. +// +// The above can be accomplished by: +// +// 1. Change existing L4-only ALLOW key on matching port that does not already +// redirect to redirect. +// - e.g., 0:80=allow,0 -> 0:80=allow, +// 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only +// key does not already exist. +// - e.g., 0:0=allow,0 -> add 0:80=allow, if 0:80 does not exist +// - this allows all traffic on port 80, but see step 5 below. +// 3. Change all L3/L4 ALLOW keys on matching port that do not already redirect to +// redirect. +// - e.g, :80=allow,0 -> :80=allow, +// 4. For each L3-only ALLOW key add the corresponding L3/L4 ALLOW redirect if no +// L3/L4 key already exists and no L4-only key already exists and one is not added. +// - e.g., :0=allow,0 -> add :80=allow, if :80 +// and 0:80 do not exist +// 5. If a new L4-only key was added: For each L3-only DENY key add the +// corresponding L3/L4 DENY key if no L3/L4 key already exists. +// - e.g., :0=deny,0 -> add :80=deny,0 if :80 does not exist +// +// With the above we only change/expand existing allow keys to redirect, and +// expand existing drop keys to also drop on the port of interest, if a new +// L4-only key allowing the port is added. +// +// 'adds' and 'oldValues' are updated with the changes made. 'adds' contains both the added and +// changed keys. 'oldValues' contains the old values for changed keys. This function does not +// delete any keys. +func (ms *mapState) AddVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, changes ChangeState) { + direction := trafficdirection.Egress + if visMeta.Ingress { + direction = trafficdirection.Ingress + } + + allowAllKey := Key{ + TrafficDirection: direction.Uint8(), + } + key := Key{ + DestPort: visMeta.Port, + Nexthdr: uint8(visMeta.Proto), + TrafficDirection: direction.Uint8(), + } + + entry := NewMapStateEntry(nil, visibilityDerivedFrom, true, false, DefaultAuthType, AuthTypeDisabled) + entry.ProxyPort = redirectPort + + _, haveAllowAllKey := ms.Get(allowAllKey) + l4Only, haveL4OnlyKey := ms.Get(key) + addL4OnlyKey := false + if haveL4OnlyKey && !l4Only.IsDeny && l4Only.ProxyPort == 0 { + // 1. Change existing L4-only ALLOW key on matching port that does not already + // redirect to redirect. + e.PolicyDebug(logrus.Fields{ + logfields.BPFMapKey: key, + logfields.BPFMapValue: entry, + }, "AddVisibilityKeys: Changing L4-only ALLOW key for visibility redirect") + ms.addKeyWithChanges(key, entry, changes) + } + if haveAllowAllKey && !haveL4OnlyKey { + // 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only + // key does not already exist. + e.PolicyDebug(logrus.Fields{ + logfields.BPFMapKey: key, + logfields.BPFMapValue: entry, + }, "AddVisibilityKeys: Adding L4-only ALLOW key for visibility redirect") + addL4OnlyKey = true + ms.addKeyWithChanges(key, entry, changes) + } + // + // Loop through all L3 keys in the traffic direction of the new key + // + ms.ForEach(func(k Key, v MapStateEntry) bool { + if k.TrafficDirection != key.TrafficDirection || k.Identity == 0 { + return true + } + if k.DestPort == key.DestPort && k.Nexthdr == key.Nexthdr { + // + // Same L4 + // + if !v.IsDeny && v.ProxyPort == 0 { + // 3. Change all L3/L4 ALLOW keys on matching port that do not + // already redirect to redirect. + v.ProxyPort = redirectPort + v.DerivedFromRules = visibilityDerivedFrom + e.PolicyDebug(logrus.Fields{ + logfields.BPFMapKey: k, + logfields.BPFMapValue: v, + }, "AddVisibilityKeys: Changing L3/L4 ALLOW key for visibility redirect") + ms.addKeyWithChanges(k, v, changes) + } + } else if k.DestPort == 0 && k.Nexthdr == 0 { + // + // Wildcarded L4, i.e., L3-only + // + k2 := k + k2.DestPort = key.DestPort + k2.Nexthdr = key.Nexthdr + if !v.IsDeny && !haveL4OnlyKey && !addL4OnlyKey { + // 4. For each L3-only ALLOW key add the corresponding L3/L4 + // ALLOW redirect if no L3/L4 key already exists and no + // L4-only key already exists and one is not added. + if _, ok := ms.Get(k2); !ok { + d2 := labels.LabelArrayList{visibilityDerivedFromLabels} + d2.MergeSorted(v.DerivedFromRules) + v2 := NewMapStateEntry(k, d2, true, false, v.hasAuthType, v.AuthType) + v2.ProxyPort = redirectPort + e.PolicyDebug(logrus.Fields{ + logfields.BPFMapKey: k2, + logfields.BPFMapValue: v2, + }, "AddVisibilityKeys: Extending L3-only ALLOW key to L3/L4 key for visibility redirect") + ms.addKeyWithChanges(k2, v2, changes) + + // Mark the new entry as a dependent of 'v' + ms.addDependentOnEntry(k, v, k2, changes) + } + } else if addL4OnlyKey && v.IsDeny { + // 5. If a new L4-only key was added: For each L3-only DENY + // key add the corresponding L3/L4 DENY key if no L3/L4 + // key already exists. + if _, ok := ms.Get(k2); !ok { + v2 := NewMapStateEntry(k, v.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled) + e.PolicyDebug(logrus.Fields{ + logfields.BPFMapKey: k2, + logfields.BPFMapValue: v2, + }, "AddVisibilityKeys: Extending L3-only DENY key to L3/L4 key to deny a port with visibility annotation") + ms.addKeyWithChanges(k2, v2, changes) + + // Mark the new entry as a dependent of 'v' + ms.addDependentOnEntry(k, v, k2, changes) + } + } + } + + return true + }) +} + +// determineAllowLocalhostIngress determines whether communication should be allowed +// from the localhost. It inserts the Key corresponding to the localhost in +// the desiredPolicyKeys if the localhost is allowed to communicate with the +// endpoint. Authentication for localhost traffic is not required. +func (ms *mapState) determineAllowLocalhostIngress() { + if option.Config.AlwaysAllowLocalhost() { + derivedFrom := labels.LabelArrayList{ + labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved), + }, + } + es := NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for local host ingress + ms.denyPreferredInsert(localHostKey, es, nil, allFeatures) + if !option.Config.EnableRemoteNodeIdentity { + var isHostDenied bool + v, ok := ms.Get(localHostKey) + isHostDenied = ok && v.IsDeny + derivedFrom := labels.LabelArrayList{ + labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowRemoteHostIngress, labels.LabelSourceReserved), + }, + } + es := NewMapStateEntry(nil, derivedFrom, false, isHostDenied, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for remote node ingress + ms.denyPreferredInsert(localRemoteNodeKey, es, nil, allFeatures) + } + } +} + +// allowAllIdentities translates all identities in selectorCache to their +// corresponding Keys in the specified direction (ingress, egress) which allows +// all at L3. +// Note that this is used when policy is not enforced, so authentication is explicitly not required. +func (ms *mapState) allowAllIdentities(ingress, egress bool) { + if ingress { + keyToAdd := Key{ + Identity: 0, + DestPort: 0, + Nexthdr: 0, + TrafficDirection: trafficdirection.Ingress.Uint8(), + } + derivedFrom := labels.LabelArrayList{ + labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved), + }, + } + ms.allows[keyToAdd] = NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled) + } + if egress { + keyToAdd := Key{ + Identity: 0, + DestPort: 0, + Nexthdr: 0, + TrafficDirection: trafficdirection.Egress.Uint8(), + } + derivedFrom := labels.LabelArrayList{ + labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved), + }, + } + ms.allows[keyToAdd] = NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled) + } +} + +func (ms *mapState) deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool { + port := uint16(l4.Port) + proto := uint8(l4.U8Proto) + + // resolve named port + if port == 0 && l4.PortName != "" { + port = policyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto) + if port == 0 { + return true + } + } + + var dir uint8 + if l4.Ingress { + dir = trafficdirection.Ingress.Uint8() + } else { + dir = trafficdirection.Egress.Uint8() + } + anyKey := Key{ + Identity: 0, + DestPort: 0, + Nexthdr: 0, + TrafficDirection: dir, + } + // Are we explicitly denying all traffic? + v, ok := ms.Get(anyKey) + if ok && v.IsDeny { + return true + } + + // Are we explicitly denying this L4-only traffic? + anyKey.DestPort = port + anyKey.Nexthdr = proto + v, ok = ms.Get(anyKey) + if ok && v.IsDeny { + return true + } + + // The given L4 is not categorically denied. + // Traffic to/from a specific L3 on any of the selectors can still be denied. + return false +} + +func (ms *mapState) GetIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) { + return ms.getIdentities(log, false) +} + +func (ms *mapState) GetDenyIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) { + return ms.getIdentities(log, true) +} + +// GetIdentities returns the ingress and egress identities stored in the +// MapState. +func (ms *mapState) getIdentities(log *logrus.Logger, denied bool) (ingIdentities, egIdentities []int64) { + ms.ForEach(func(policyMapKey Key, policyMapValue MapStateEntry) bool { + if denied != policyMapValue.IsDeny { + return true + } + if policyMapKey.DestPort != 0 { + // If the port is non-zero, then the Key no longer only applies + // at L3. AllowedIngressIdentities and AllowedEgressIdentities + // contain sets of which identities (i.e., label-based L3 only) + // are allowed, so anything which contains L4-related policy should + // not be added to these sets. + return true + } + switch trafficdirection.TrafficDirection(policyMapKey.TrafficDirection) { + case trafficdirection.Ingress: + ingIdentities = append(ingIdentities, int64(policyMapKey.Identity)) + case trafficdirection.Egress: + egIdentities = append(egIdentities, int64(policyMapKey.Identity)) + default: + td := trafficdirection.TrafficDirection(policyMapKey.TrafficDirection) + log.WithField(logfields.TrafficDirection, td). + Errorf("Unexpected traffic direction present in policy map state for endpoint") + } + return true + }) + return ingIdentities, egIdentities +} + +// MapChanges collects updates to the endpoint policy on the +// granularity of individual mapstate key-value pairs for both adds +// and deletes. 'mutex' must be held for any access. +type MapChanges struct { + mutex lock.Mutex + changes []MapChange +} + +type MapChange struct { + Add bool // false deletes + Key Key + Value MapStateEntry +} + +// AccumulateMapChanges accumulates the given changes to the +// MapChanges. +// +// The caller is responsible for making sure the same identity is not +// present in both 'adds' and 'deletes'. +func (mc *MapChanges) AccumulateMapChanges(cs CachedSelector, adds, deletes []identity.NumericIdentity, key Key, value MapStateEntry) { + mc.mutex.Lock() + for _, id := range adds { + key.Identity = id.Uint32() + mc.changes = append(mc.changes, MapChange{Add: true, Key: key, Value: value}) + } + for _, id := range deletes { + key.Identity = id.Uint32() + mc.changes = append(mc.changes, MapChange{Add: false, Key: key, Value: value}) + } + mc.mutex.Unlock() +} + +// consumeMapChanges transfers the incremental changes from MapChanges to the caller, +// while applying the changes to PolicyMapState. +func (mc *MapChanges) consumeMapChanges(policyMapState MapState, features policyFeatures, identities Identities) (adds, deletes Keys) { + mc.mutex.Lock() + changes := ChangeState{ + Adds: make(Keys, len(mc.changes)), + Deletes: make(Keys, len(mc.changes)), + } + + for i := range mc.changes { + if mc.changes[i].Add { + // insert but do not allow non-redirect entries to overwrite a redirect entry, + // nor allow non-deny entries to overwrite deny entries. + // Collect the incremental changes to the overall state in 'mc.adds' and 'mc.deletes'. + policyMapState.denyPreferredInsertWithChanges(mc.changes[i].Key, mc.changes[i].Value, identities, features, changes) + } else { + // Delete the contribution of this cs to the key and collect incremental changes + for cs := range mc.changes[i].Value.owners { // get the sole selector + policyMapState.deleteKeyWithChanges(mc.changes[i].Key, cs, changes) + } + } + } + mc.changes = nil + mc.mutex.Unlock() + return changes.Adds, changes.Deletes +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/policy.go b/vendor/github.com/cilium/cilium/pkg/policy/policy.go new file mode 100644 index 0000000000..861530dd84 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/policy.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "io" + stdlog "log" + "strconv" + "strings" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/policy/api" +) + +type Tracing int + +const ( + TRACE_DISABLED Tracing = iota + TRACE_ENABLED + TRACE_VERBOSE +) + +// TraceEnabled returns true if the SearchContext requests tracing. +func (s *SearchContext) TraceEnabled() bool { + return s.Trace != TRACE_DISABLED +} + +// PolicyTrace logs the given message into the SearchContext logger only if +// TRACE_ENABLED or TRACE_VERBOSE is enabled in the receiver's SearchContext. +func (s *SearchContext) PolicyTrace(format string, a ...interface{}) { + if s.TraceEnabled() { + log.Debugf(format, a...) + if s.Logging != nil { + format = "%-" + s.CallDepth() + "s" + format + a = append([]interface{}{""}, a...) + s.Logging.Printf(format, a...) + } + } +} + +// PolicyTraceVerbose logs the given message into the SearchContext logger only +// if TRACE_VERBOSE is enabled in the receiver's SearchContext. +func (s *SearchContext) PolicyTraceVerbose(format string, a ...interface{}) { + switch s.Trace { + case TRACE_VERBOSE: + log.Debugf(format, a...) + if s.Logging != nil { + s.Logging.Printf(format, a...) + } + } +} + +// SearchContext defines the context while evaluating policy +type SearchContext struct { + Trace Tracing + Depth int + Logging *stdlog.Logger + From labels.LabelArray + To labels.LabelArray + DPorts []*models.Port + // rulesSelect specifies whether or not to check whether a rule which is + // being analyzed using this SearchContext matches either From or To. + // This is used to avoid using EndpointSelector.Matches() if possible, + // since it is costly in terms of performance. + rulesSelect bool +} + +func (s *SearchContext) String() string { + from := make([]string, 0, len(s.From)) + to := make([]string, 0, len(s.To)) + dports := make([]string, 0, len(s.DPorts)) + for _, fromLabel := range s.From { + from = append(from, fromLabel.String()) + } + for _, toLabel := range s.To { + to = append(to, toLabel.String()) + } + // We should avoid to use `fmt.Sprintf()` since + // it is well-known for not being opimal in terms of + // CPU and memory allocations. + // See https://github.com/cilium/cilium/issues/19571 + for _, dport := range s.DPorts { + dportStr := dport.Name + if dportStr == "" { + dportStr = strconv.FormatUint(uint64(dport.Port), 10) + } + dports = append(dports, dportStr+"/"+dport.Protocol) + } + fromStr := strings.Join(from, ", ") + toStr := strings.Join(to, ", ") + if len(dports) != 0 { + dportStr := strings.Join(dports, ", ") + return "From: [" + fromStr + "] => To: [" + toStr + "] Ports: [" + dportStr + "]" + } + return "From: [" + fromStr + "] => To: [" + toStr + "]" +} + +func (s *SearchContext) CallDepth() string { + return strconv.Itoa(s.Depth * 2) +} + +// WithLogger returns a shallow copy of the received SearchContext with the +// logging set to write to 'log'. +func (s *SearchContext) WithLogger(log io.Writer) *SearchContext { + result := *s + result.Logging = stdlog.New(log, "", 0) + if result.Trace == TRACE_DISABLED { + result.Trace = TRACE_ENABLED + } + return &result +} + +// Translator is an interface for altering policy rules +type Translator interface { + Translate(*api.Rule, *TranslationResult) error +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go b/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go new file mode 100644 index 0000000000..96029950a5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/policy/trafficdirection" + "github.com/cilium/cilium/pkg/u8proto" +) + +// ProxyID returns a unique string to identify a proxy mapping. +func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16) string { + direction := "egress" + if ingress { + direction = "ingress" + } + return strconv.FormatUint(uint64(endpointID), 10) + ":" + direction + ":" + protocol + ":" + strconv.FormatUint(uint64(port), 10) +} + +// ProxyIDFromKey returns a unique string to identify a proxy mapping. +func ProxyIDFromKey(endpointID uint16, key Key) string { + return ProxyID(endpointID, key.TrafficDirection == trafficdirection.Ingress.Uint8(), u8proto.U8proto(key.Nexthdr).String(), key.DestPort) +} + +// ParseProxyID parses a proxy ID returned by ProxyID and returns its components. +func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, err error) { + comps := strings.Split(proxyID, ":") + if len(comps) != 4 { + err = fmt.Errorf("invalid proxy ID structure: %s", proxyID) + return + } + epID, err := strconv.ParseUint(comps[0], 10, 16) + if err != nil { + return + } + endpointID = uint16(epID) + ingress = comps[1] == "ingress" + protocol = comps[2] + l4port, err := strconv.ParseUint(comps[3], 10, 16) + if err != nil { + return + } + port = uint16(l4port) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/repository.go b/vendor/github.com/cilium/cilium/pkg/policy/repository.go new file mode 100644 index 0000000000..52cde0e927 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/repository.go @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "context" + "encoding/json" + "fmt" + "net/netip" + "sync" + "sync/atomic" + + cilium "github.com/cilium/proxy/go/cilium/api" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/crypto/certificatemanager" + "github.com/cilium/cilium/pkg/eventqueue" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/cache" + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/policy/api" +) + +// PolicyContext is an interface policy resolution functions use to access the Repository. +// This way testing code can run without mocking a full Repository. +type PolicyContext interface { + // return the namespace in which the policy rule is being resolved + GetNamespace() string + + // return the SelectorCache + GetSelectorCache() *SelectorCache + + // GetTLSContext resolves the given 'api.TLSContext' into CA + // certs and the public and private keys, using secrets from + // k8s or from the local file system. + GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error) + + // GetEnvoyHTTPRules translates the given 'api.L7Rules' into + // the protobuf representation the Envoy can consume. The bool + // return parameter tells whether the the rule enforcement can + // be short-circuited upon the first allowing rule. This is + // false if any of the rules has side-effects, requiring all + // such rules being evaluated. + GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) + + // IsDeny returns true if the policy computation should be done for the + // policy deny case. This function returns different values depending on the + // code path as it can be changed during the policy calculation. + IsDeny() bool + + // SetDeny sets the Deny field of the PolicyContext and returns the old + // value stored. + SetDeny(newValue bool) (oldValue bool) +} + +type policyContext struct { + repo *Repository + ns string + // isDeny this field is set to true if the given policy computation should + // be done for the policy deny. + isDeny bool +} + +// GetNamespace() returns the namespace for the policy rule being resolved +func (p *policyContext) GetNamespace() string { + return p.ns +} + +// GetSelectorCache() returns the selector cache used by the Repository +func (p *policyContext) GetSelectorCache() *SelectorCache { + return p.repo.GetSelectorCache() +} + +// GetTLSContext() returns data for TLS Context via a CertificateManager +func (p *policyContext) GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error) { + if p.repo.certManager == nil { + return "", "", "", fmt.Errorf("No Certificate Manager set on Policy Repository") + } + return p.repo.certManager.GetTLSContext(context.TODO(), tls, p.ns) +} + +func (p *policyContext) GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) { + return p.repo.GetEnvoyHTTPRules(l7Rules, p.ns) +} + +// IsDeny returns true if the policy computation should be done for the +// policy deny case. This function return different values depending on the +// code path as it can be changed during the policy calculation. +func (p *policyContext) IsDeny() bool { + return p.isDeny +} + +// SetDeny sets the Deny field of the PolicyContext and returns the old +// value stored. +func (p *policyContext) SetDeny(deny bool) bool { + oldDeny := p.isDeny + p.isDeny = deny + return oldDeny +} + +// Repository is a list of policy rules which in combination form the security +// policy. A policy repository can be +type Repository struct { + // Mutex protects the whole policy tree + Mutex lock.RWMutex + rules ruleSlice + + // rulesIndexByK8sUID indexes the rules by k8s UID. + rulesIndexByK8sUID map[string]*rule + + // revision is the revision of the policy repository. It will be + // incremented whenever the policy repository is changed. + // Always positive (>0). + revision atomic.Uint64 + + // RepositoryChangeQueue is a queue which serializes changes to the policy + // repository. + RepositoryChangeQueue *eventqueue.EventQueue + + // RuleReactionQueue is a queue which serializes the resultant events that + // need to occur after updating the state of the policy repository. This + // can include queueing endpoint regenerations, policy revision increments + // for endpoints, etc. + RuleReactionQueue *eventqueue.EventQueue + + // SelectorCache tracks the selectors used in the policies + // resolved from the repository. + selectorCache *SelectorCache + + // PolicyCache tracks the selector policies created from this repo + policyCache *PolicyCache + + certManager certificatemanager.CertificateManager + secretManager certificatemanager.SecretManager + + getEnvoyHTTPRules func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool) +} + +// GetSelectorCache() returns the selector cache used by the Repository +func (p *Repository) GetSelectorCache() *SelectorCache { + return p.selectorCache +} + +// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID +func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes { + return p.policyCache.GetAuthTypes(localID, remoteID) +} + +func (p *Repository) SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)) { + p.getEnvoyHTTPRules = f +} + +func (p *Repository) GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) { + if p.getEnvoyHTTPRules == nil { + return nil, true + } + return p.getEnvoyHTTPRules(p.secretManager, l7Rules, ns) +} + +// GetPolicyCache() returns the policy cache used by the Repository +func (p *Repository) GetPolicyCache() *PolicyCache { + return p.policyCache +} + +// NewPolicyRepository creates a new policy repository. +func NewPolicyRepository( + idAllocator cache.IdentityAllocator, + idCache cache.IdentityCache, + certManager certificatemanager.CertificateManager, + secretManager certificatemanager.SecretManager, +) *Repository { + repo := NewStoppedPolicyRepository(idAllocator, idCache, certManager, secretManager) + repo.Start() + return repo +} + +// NewStoppedPolicyRepository creates a new policy repository without starting +// queues. +// +// Qeues must be allocated via [Repository.Start]. The function serves to +// satisfy hive invariants. +func NewStoppedPolicyRepository( + idAllocator cache.IdentityAllocator, + idCache cache.IdentityCache, + certManager certificatemanager.CertificateManager, + secretManager certificatemanager.SecretManager, +) *Repository { + selectorCache := NewSelectorCache(idAllocator, idCache) + repo := &Repository{ + rulesIndexByK8sUID: map[string]*rule{}, + selectorCache: selectorCache, + certManager: certManager, + secretManager: secretManager, + } + repo.revision.Store(1) + repo.policyCache = NewPolicyCache(repo, true) + return repo +} + +// traceState is an internal structure used to collect information +// while determining policy decision +type traceState struct { + // selectedRules is the number of rules with matching EndpointSelector + selectedRules int + + // matchedRules is the number of rules that have allowed traffic + matchedRules int + + // matchedDenyRules is the number of rules that have denied traffic + matchedDenyRules int + + // constrainedRules counts how many "FromRequires" constraints are + // unsatisfied + constrainedRules int + + // ruleID is the rule ID currently being evaluated + ruleID int +} + +func (state *traceState) trace(rules int, ctx *SearchContext) { + ctx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules) + if state.constrainedRules > 0 { + ctx.PolicyTrace("Found unsatisfied FromRequires constraint\n") + } else { + if state.matchedRules > 0 { + ctx.PolicyTrace("Found allow rule\n") + } else { + ctx.PolicyTrace("Found no allow rule\n") + } + if state.matchedDenyRules > 0 { + ctx.PolicyTrace("Found deny rule\n") + } else { + ctx.PolicyTrace("Found no deny rule\n") + } + } +} + +// Start allocates and starts various queues used by the Repository. +// +// Must only be called if using [NewStoppedPolicyRepository] +func (p *Repository) Start() { + p.RepositoryChangeQueue = eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize) + p.RuleReactionQueue = eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize) + p.RepositoryChangeQueue.Run() + p.RuleReactionQueue.Run() +} + +// ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints +// by searching the policy repository for `PortRule` rules that are attached to +// a `Rule` where the EndpointSelector matches `ctx.To`. `ctx.From` takes no effect and +// is ignored in the search. If multiple `PortRule` rules are found, all rules +// are merged together. If rules contains overlapping port definitions, the first +// rule found in the repository takes precedence. +// +// TODO: Coalesce l7 rules? +// +// Caller must release resources by calling Detach() on the returned map! +// +// NOTE: This is only called from unit tests, but from multiple packages. +func (p *Repository) ResolveL4IngressPolicy(ctx *SearchContext) (L4PolicyMap, error) { + policyCtx := policyContext{ + repo: p, + ns: ctx.To.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel), + } + result, err := p.rules.resolveL4IngressPolicy(&policyCtx, ctx) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveL4EgressPolicy resolves the L4 egress policy for a set of endpoints +// by searching the policy repository for `PortRule` rules that are attached to +// a `Rule` where the EndpointSelector matches `ctx.From`. `ctx.To` takes no effect and +// is ignored in the search. If multiple `PortRule` rules are found, all rules +// are merged together. If rules contains overlapping port definitions, the first +// rule found in the repository takes precedence. +// +// Caller must release resources by calling Detach() on the returned map! +// +// NOTE: This is only called from unit tests, but from multiple packages. +func (p *Repository) ResolveL4EgressPolicy(ctx *SearchContext) (L4PolicyMap, error) { + policyCtx := policyContext{ + repo: p, + ns: ctx.From.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel), + } + result, err := p.rules.resolveL4EgressPolicy(&policyCtx, ctx) + + if err != nil { + return nil, err + } + + return result, nil +} + +// AllowsIngressRLocked evaluates the policy repository for the provided search +// context and returns the verdict for ingress. If no matching policy allows for +// the connection, the request will be denied. The policy repository mutex must +// be held. +// +// NOTE: This is only called from unit tests, but from multiple packages. +func (p *Repository) AllowsIngressRLocked(ctx *SearchContext) api.Decision { + // Lack of DPorts in the SearchContext means L3-only search + if len(ctx.DPorts) == 0 { + newCtx := *ctx + newCtx.DPorts = []*models.Port{{ + Port: 0, + Protocol: models.PortProtocolANY, + }} + ctx = &newCtx + } + + ctx.PolicyTrace("Tracing %s", ctx.String()) + ingressPolicy, err := p.ResolveL4IngressPolicy(ctx) + if err != nil { + log.WithError(err).Warn("Evaluation error while resolving L4 ingress policy") + } + + verdict := api.Denied + if err == nil && len(ingressPolicy) > 0 { + verdict = ingressPolicy.IngressCoversContext(ctx) + } + + ctx.PolicyTrace("Ingress verdict: %s", verdict.String()) + ingressPolicy.Detach(p.GetSelectorCache()) + + return verdict +} + +// AllowsEgressRLocked evaluates the policy repository for the provided search +// context and returns the verdict. If no matching policy allows for the +// connection, the request will be denied. The policy repository mutex must be +// held. +// +// NOTE: This is only called from unit tests, but from multiple packages. +func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision { + // Lack of DPorts in the SearchContext means L3-only search + if len(ctx.DPorts) == 0 { + newCtx := *ctx + newCtx.DPorts = []*models.Port{{ + Port: 0, + Protocol: models.PortProtocolANY, + }} + ctx = &newCtx + } + + ctx.PolicyTrace("Tracing %s\n", ctx.String()) + egressPolicy, err := p.ResolveL4EgressPolicy(ctx) + if err != nil { + log.WithError(err).Warn("Evaluation error while resolving L4 egress policy") + } + verdict := api.Denied + if err == nil && len(egressPolicy) > 0 { + verdict = egressPolicy.EgressCoversContext(ctx) + } + + ctx.PolicyTrace("Egress verdict: %s", verdict.String()) + egressPolicy.Detach(p.GetSelectorCache()) + return verdict +} + +// SearchRLocked searches the policy repository for rules which match the +// specified labels and will return an array of all rules which matched. +func (p *Repository) SearchRLocked(lbls labels.LabelArray) api.Rules { + result := api.Rules{} + + if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + r, ok := p.rulesIndexByK8sUID[uid] + if ok { + result = append(result, &r.Rule) + } + return result + } + for _, r := range p.rules { + if r.Labels.Contains(lbls) { + result = append(result, &r.Rule) + } + } + + return result +} + +// Add inserts a rule into the policy repository +// This is just a helper function for unit testing. +// TODO: this should be in a test_helpers.go file or something similar +// so we can clearly delineate what helpers are for testing. +// NOTE: This is only called from unit tests, but from multiple packages. +func (p *Repository) Add(r api.Rule) (uint64, map[uint16]struct{}, error) { + p.Mutex.Lock() + defer p.Mutex.Unlock() + + if err := r.Sanitize(); err != nil { + return p.GetRevision(), nil, err + } + + newList := make([]*api.Rule, 1) + newList[0] = &r + _, rev := p.AddListLocked(newList) + return rev, map[uint16]struct{}{}, nil +} + +// AddListLocked inserts a rule into the policy repository with the repository already locked +// Expects that the entire rule list has already been sanitized. +func (p *Repository) AddListLocked(rules api.Rules) (ruleSlice, uint64) { + + newList := make(ruleSlice, len(rules)) + for i := range rules { + newRule := &rule{ + Rule: *rules[i], + metadata: newRuleMetadata(), + } + newList[i] = newRule + if uid := rules[i].Labels.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + p.rulesIndexByK8sUID[uid] = newRule + } + } + + p.rules = append(p.rules, newList...) + p.BumpRevision() + metrics.Policy.Add(float64(len(newList))) + return newList, p.GetRevision() +} + +// removeIdentityFromRuleCaches removes the identity from the selector cache +// in each rule in the repository. +// +// Returns a sync.WaitGroup that blocks until the policy operation is complete. +// The repository read lock must be held until the waitgroup is complete. +func (p *Repository) removeIdentityFromRuleCaches(identity *identity.Identity) *sync.WaitGroup { + var wg sync.WaitGroup + wg.Add(len(p.rules)) + for _, r := range p.rules { + go func(rr *rule, wgg *sync.WaitGroup) { + rr.metadata.delete(identity) + wgg.Done() + }(r, &wg) + } + return &wg +} + +// LocalEndpointIdentityAdded handles local identity add events. +func (p *Repository) LocalEndpointIdentityAdded(*identity.Identity) { + // no-op for now. +} + +// LocalEndpointIdentityRemoved handles local identity removal events to +// remove references from rules in the repository to the specified identity. +func (p *Repository) LocalEndpointIdentityRemoved(identity *identity.Identity) { + go func() { + scopedLog := log.WithField(logfields.Identity, identity) + scopedLog.Debug("Removing identity references from policy cache") + p.Mutex.RLock() + wg := p.removeIdentityFromRuleCaches(identity) + wg.Wait() + p.Mutex.RUnlock() + scopedLog.Debug("Finished cleaning policy cache") + }() +} + +// AddList inserts a rule into the policy repository. It is used for +// unit-testing purposes only. +func (p *Repository) AddList(rules api.Rules) (ruleSlice, uint64) { + p.Mutex.Lock() + defer p.Mutex.Unlock() + return p.AddListLocked(rules) +} + +// Iterate iterates the policy repository, calling f for each rule. It is safe +// to execute Iterate concurrently. +func (p *Repository) Iterate(f func(rule *api.Rule)) { + p.Mutex.RWMutex.Lock() + defer p.Mutex.RWMutex.Unlock() + for _, r := range p.rules { + f(&r.Rule) + } +} + +// UpdateRulesEndpointsCaches updates the caches within each rule in r that +// specify whether the rule selects the endpoints in eps. If any rule matches +// the endpoints, it is added to the provided IDSet, and removed from the +// provided EndpointSet. The provided WaitGroup is signaled for a given endpoint +// when it is finished being processed. +func (r ruleSlice) UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegenerate *EndpointSet, policySelectionWG *sync.WaitGroup) { + endpointsToBumpRevision.ForEachGo(policySelectionWG, func(epp Endpoint) { + endpointSelected, err := r.updateEndpointsCaches(epp) + if endpointSelected { + endpointsToRegenerate.Insert(epp) + } + // If we could not evaluate the rules against the current endpoint, or + // the endpoint is selected by the rules, remove it from the set of + // endpoints to bump the revision. If the error is non-nil, the + // endpoint is no longer in either set (endpointsToBumpRevision or + // endpointsToRegenerate, as we could not determine what to do for the + // endpoint). This is usually the case when the endpoint is no longer + // alive (i.e., it has been marked to be deleted). + if endpointSelected || err != nil { + if err != nil { + log.WithError(err).Debug("could not determine whether endpoint was selected by rule") + } + endpointsToBumpRevision.Delete(epp) + } + }) +} + +// DeleteByLabelsLocked deletes all rules in the policy repository which +// contain the specified labels. Returns the revision of the policy repository +// after deleting the rules, as well as now many rules were deleted. +func (p *Repository) DeleteByLabelsLocked(lbls labels.LabelArray) (ruleSlice, uint64, int) { + + deleted := 0 + new := p.rules[:0] + deletedRules := ruleSlice{} + + for _, r := range p.rules { + if !r.Labels.Contains(lbls) { + new = append(new, r) + } else { + deletedRules = append(deletedRules, r) + deleted++ + } + } + + if deleted > 0 { + p.BumpRevision() + p.rules = new + if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + delete(p.rulesIndexByK8sUID, uid) + } + metrics.Policy.Sub(float64(deleted)) + } + + return deletedRules, p.GetRevision(), deleted +} + +// DeleteByLabels deletes all rules in the policy repository which contain the +// specified labels +func (p *Repository) DeleteByLabels(lbls labels.LabelArray) (uint64, int) { + p.Mutex.Lock() + defer p.Mutex.Unlock() + _, rev, numDeleted := p.DeleteByLabelsLocked(lbls) + return rev, numDeleted +} + +// JSONMarshalRules returns a slice of policy rules as string in JSON +// representation +func JSONMarshalRules(rules api.Rules) string { + b, err := json.MarshalIndent(rules, "", " ") + if err != nil { + return err.Error() + } + return string(b) +} + +// GetJSON returns all rules of the policy repository as string in JSON +// representation +func (p *Repository) GetJSON() string { + p.Mutex.RLock() + defer p.Mutex.RUnlock() + + result := api.Rules{} + for _, r := range p.rules { + result = append(result, &r.Rule) + } + + return JSONMarshalRules(result) +} + +// GetRulesMatching returns whether any of the rules in a repository contain a +// rule with labels matching the labels in the provided LabelArray. +// +// Must be called with p.Mutex held +func (p *Repository) GetRulesMatching(lbls labels.LabelArray) (ingressMatch bool, egressMatch bool) { + ingressMatch = false + egressMatch = false + for _, r := range p.rules { + rulesMatch := r.getSelector().Matches(lbls) + if rulesMatch { + if len(r.Ingress) > 0 { + ingressMatch = true + } + if len(r.IngressDeny) > 0 { + ingressMatch = true + } + if len(r.Egress) > 0 { + egressMatch = true + } + if len(r.EgressDeny) > 0 { + egressMatch = true + } + } + + if ingressMatch && egressMatch { + return + } + } + return +} + +// getMatchingRules returns whether any of the rules in a repository contain a +// rule with labels matching the given security identity, as well as +// a slice of all rules which match. +// +// Must be called with p.Mutex held +func (p *Repository) getMatchingRules(securityIdentity *identity.Identity) ( + ingressMatch, egressMatch bool, + matchingRules ruleSlice) { + + matchingRules = []*rule{} + for _, r := range p.rules { + isNode := securityIdentity.ID == identity.ReservedIdentityHost + selectsNode := r.NodeSelector.LabelSelector != nil + if selectsNode != isNode { + continue + } + if ruleMatches := r.matches(securityIdentity); ruleMatches { + // Don't need to update whether ingressMatch is true if it already + // has been determined to be true - allows us to not have to check + // lenth of slice. + if !ingressMatch { + ingressMatch = len(r.Ingress) > 0 || len(r.IngressDeny) > 0 + } + if !egressMatch { + egressMatch = len(r.Egress) > 0 || len(r.EgressDeny) > 0 + } + matchingRules = append(matchingRules, r) + } + } + return +} + +// NumRules returns the amount of rules in the policy repository. +// +// Must be called with p.Mutex held +func (p *Repository) NumRules() int { + return len(p.rules) +} + +// GetRevision returns the revision of the policy repository +func (p *Repository) GetRevision() uint64 { + return p.revision.Load() +} + +// Empty returns 'true' if repository has no rules, 'false' otherwise. +// +// Must be called without p.Mutex held +func (p *Repository) Empty() bool { + p.Mutex.Lock() + defer p.Mutex.Unlock() + return p.NumRules() == 0 +} + +// TranslationResult contains the results of the rule translation +type TranslationResult struct { + // NumToServicesRules is the number of ToServices rules processed while + // translating the rules + NumToServicesRules int + + // BackendPrefixes contains all egress CIDRs that are to be added + // for the translation. + PrefixesToAdd []netip.Prefix + + // BackendPrefixes contains all egress CIDRs that are to be removed + // for the translation. + PrefixesToRelease []netip.Prefix +} + +// TranslateRules traverses rules and applies provided translator to rules +// +// Note: Only used by the k8s watcher. +func (p *Repository) TranslateRules(translator Translator) (*TranslationResult, error) { + p.Mutex.Lock() + defer p.Mutex.Unlock() + + result := &TranslationResult{} + + for ruleIndex := range p.rules { + if err := translator.Translate(&p.rules[ruleIndex].Rule, result); err != nil { + return nil, err + } + } + return result, nil +} + +// BumpRevision allows forcing policy regeneration +func (p *Repository) BumpRevision() { + metrics.PolicyRevision.Inc() + p.revision.Add(1) +} + +// GetRulesList returns the current policy +func (p *Repository) GetRulesList() *models.Policy { + p.Mutex.RLock() + defer p.Mutex.RUnlock() + + lbls := labels.ParseSelectLabelArrayFromArray([]string{}) + ruleList := p.SearchRLocked(lbls) + + return &models.Policy{ + Revision: int64(p.GetRevision()), + Policy: JSONMarshalRules(ruleList), + } +} + +// resolvePolicyLocked returns the selectorPolicy for the provided +// identity from the set of rules in the repository. If the policy +// cannot be generated due to conflicts at L4 or L7, returns an error. +// +// Must be performed while holding the Repository lock. +func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) { + // First obtain whether policy applies in both traffic directions, as well + // as list of rules which actually select this endpoint. This allows us + // to not have to iterate through the entire rule list multiple times and + // perform the matching decision again when computing policy for each + // protocol layer, which is quite costly in terms of performance. + ingressEnabled, egressEnabled, + matchingRules := + p.computePolicyEnforcementAndRules(securityIdentity) + + calculatedPolicy := &selectorPolicy{ + Revision: p.GetRevision(), + SelectorCache: p.GetSelectorCache(), + L4Policy: NewL4Policy(p.GetRevision()), + IngressPolicyEnabled: ingressEnabled, + EgressPolicyEnabled: egressEnabled, + } + + lbls := securityIdentity.LabelArray + ingressCtx := SearchContext{ + To: lbls, + rulesSelect: true, + } + + egressCtx := SearchContext{ + From: lbls, + rulesSelect: true, + } + + if option.Config.TracingEnabled() { + ingressCtx.Trace = TRACE_ENABLED + egressCtx.Trace = TRACE_ENABLED + } + + policyCtx := policyContext{ + repo: p, + ns: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel), + } + + if ingressEnabled { + newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&policyCtx, &ingressCtx) + if err != nil { + return nil, err + } + calculatedPolicy.L4Policy.Ingress.PortRules = newL4IngressPolicy + } + + if egressEnabled { + newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&policyCtx, &egressCtx) + if err != nil { + return nil, err + } + calculatedPolicy.L4Policy.Egress.PortRules = newL4EgressPolicy + } + + // Make the calculated policy ready for incremental updates + calculatedPolicy.Attach(&policyCtx) + + return calculatedPolicy, nil +} + +// computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress +// for the given security identity, as well as a list of any rules which select +// the set of labels of the given security identity. +// +// Must be called with repo mutex held for reading. +func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) ( + ingress, egress bool, + matchingRules ruleSlice, +) { + lbls := securityIdentity.LabelArray + + // Check if policy enforcement should be enabled at the daemon level. + if lbls.Has(labels.IDNameHost) && !option.Config.EnableHostFirewall { + return false, false, nil + } + switch GetPolicyEnabled() { + case option.AlwaysEnforce: + _, _, matchingRules = p.getMatchingRules(securityIdentity) + // If policy enforcement is enabled for the daemon, then it has to be + // enabled for the endpoint. + return true, true, matchingRules + case option.DefaultEnforcement: + ingress, egress, matchingRules = p.getMatchingRules(securityIdentity) + // If the endpoint has the reserved:init label, i.e. if it has not yet + // received any labels, always enforce policy (default deny). + if lbls.Has(labels.IDNameInit) { + return true, true, matchingRules + } + + // Default mode means that if rules contain labels that match this + // endpoint, then enable policy enforcement for this endpoint. + return ingress, egress, matchingRules + default: + // If policy enforcement isn't enabled, we do not enable policy + // enforcement for the endpoint. We don't care about returning any + // rules that match. + return false, false, nil + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go new file mode 100644 index 0000000000..7fdfac8156 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/policy/trafficdirection" +) + +// selectorPolicy is a structure which contains the resolved policy for a +// particular Identity across all layers (L3, L4, and L7), with the policy +// still determined in terms of EndpointSelectors. +type selectorPolicy struct { + // Revision is the revision of the policy repository used to generate + // this selectorPolicy. + Revision uint64 + + // SelectorCache managing selectors in L4Policy + SelectorCache *SelectorCache + + // L4Policy contains the computed L4 and L7 policy. + L4Policy L4Policy + + // IngressPolicyEnabled specifies whether this policy contains any policy + // at ingress. + IngressPolicyEnabled bool + + // EgressPolicyEnabled specifies whether this policy contains any policy + // at egress. + EgressPolicyEnabled bool +} + +func (p *selectorPolicy) Attach(ctx PolicyContext) { + p.L4Policy.Attach(ctx) +} + +// EndpointPolicy is a structure which contains the resolved policy across all +// layers (L3, L4, and L7), distilled against a set of identities. +type EndpointPolicy struct { + // Note that all Endpoints sharing the same identity will be + // referring to a shared selectorPolicy! + *selectorPolicy + + // policyMapState contains the state of this policy as it relates to the + // datapath. In the future, this will be factored out of this object to + // decouple the policy as it relates to the datapath vs. its userspace + // representation. + // It maps each Key to the proxy port if proxy redirection is needed. + // Proxy port 0 indicates no proxy redirection. + // All fields within the Key and the proxy port must be in host byte-order. + // Must only be accessed with PolicyOwner (aka Endpoint) lock taken. + policyMapState MapState + + // policyMapChanges collects pending changes to the PolicyMapState + policyMapChanges MapChanges + + // PolicyOwner describes any type which consumes this EndpointPolicy object. + PolicyOwner PolicyOwner +} + +// PolicyOwner is anything which consumes a EndpointPolicy. +type PolicyOwner interface { + GetID() uint64 + LookupRedirectPortBuildLocked(ingress bool, protocol string, port uint16) uint16 + HasBPFPolicyMap() bool + GetNamedPort(ingress bool, name string, proto uint8) uint16 + PolicyDebug(fields logrus.Fields, msg string) +} + +// newSelectorPolicy returns an empty selectorPolicy stub. +func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy { + return &selectorPolicy{ + Revision: 0, + SelectorCache: selectorCache, + L4Policy: NewL4Policy(0), + } +} + +// insertUser adds a user to the L4Policy so that incremental +// updates of the L4Policy may be fowarded. +func (p *selectorPolicy) insertUser(user *EndpointPolicy) { + p.L4Policy.insertUser(user) +} + +// removeUser removes a user from the L4Policy so the EndpointPolicy +// can be freed when not needed any more +func (p *selectorPolicy) removeUser(user *EndpointPolicy) { + p.L4Policy.removeUser(user) +} + +// Detach releases resources held by a selectorPolicy to enable +// successful eventual GC. Note that the selectorPolicy itself if not +// modified in any way, so that it can be used concurrently. +func (p *selectorPolicy) Detach() { + p.L4Policy.Detach(p.SelectorCache) +} + +// DistillPolicy filters down the specified selectorPolicy (which acts +// upon selectors) into a set of concrete map entries based on the +// SelectorCache. These can subsequently be plumbed into the datapath. +// +// Called without holding the Selector cache or Repository locks. +// PolicyOwner (aka Endpoint) is also unlocked during this call, +// but the Endpoint's build mutex is held. +func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *EndpointPolicy { + calculatedPolicy := &EndpointPolicy{ + selectorPolicy: p, + policyMapState: NewMapState(nil), + PolicyOwner: policyOwner, + } + + if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled { + calculatedPolicy.policyMapState.allowAllIdentities( + !p.IngressPolicyEnabled, !p.EgressPolicyEnabled) + } + + // Register the new EndpointPolicy as a receiver of delta + // updates. Any updates happening after this, but before + // computeDesiredL4PolicyMapEntries() call finishes may + // already be applied to the PolicyMapState, specifically: + // + // - policyMapChanges may contain an addition of an entry that + // is already added to the PolicyMapState + // + // - policyMapChanges may contain a deletion of an entry that + // has already been deleted from PolicyMapState + p.insertUser(calculatedPolicy) + + // Must come after the 'insertUser()' above to guarantee + // PolicyMapChanges will contain all changes that are applied + // after the computation of PolicyMapState has started. + p.SelectorCache.mutex.RLock() + calculatedPolicy.toMapState() + if !isHost { + calculatedPolicy.policyMapState.determineAllowLocalhostIngress() + } + p.SelectorCache.mutex.RUnlock() + + return calculatedPolicy +} + +// GetPolicyMap gets the policy map state as the interface +// MapState +func (p *EndpointPolicy) GetPolicyMap() MapState { + return p.policyMapState +} + +// SetPolicyMap sets the policy map state as the interface +// MapState. If the main argument is nil, then this method +// will initialize a new MapState object for the caller. +func (p *EndpointPolicy) SetPolicyMap(ms MapState) { + if ms == nil { + p.policyMapState = NewMapState(nil) + return + } + p.policyMapState = ms +} + +// Detach removes EndpointPolicy references from selectorPolicy +// to allow the EndpointPolicy to be GC'd. +// PolicyOwner (aka Endpoint) is also locked during this call. +func (p *EndpointPolicy) Detach() { + p.selectorPolicy.removeUser(p) +} + +// toMapState transforms the EndpointPolicy.L4Policy into +// the datapath-friendly format inside EndpointPolicy.PolicyMapState. +// Called with selectorcache locked for reading. +// Called without holding the Repository lock. +// PolicyOwner (aka Endpoint) is also unlocked during this call, +// but the Endpoint's build mutex is held. +func (p *EndpointPolicy) toMapState() { + p.L4Policy.Ingress.toMapState(p) + p.L4Policy.Egress.toMapState(p) +} + +// toMapState transforms the L4DirectionPolicy into +// the datapath-friendly format inside EndpointPolicy.PolicyMapState. +// Called with selectorcache locked for reading. +// Called without holding the Repository lock. +// PolicyOwner (aka Endpoint) is also unlocked during this call, +// but the Endpoint's build mutex is held. +func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) { + for _, l4 := range l4policy.PortRules { + lookupDone := false + proxyport := uint16(0) + l4.toMapState(p, l4policy.features, func(keyFromFilter Key, entry *MapStateEntry) bool { + // Fix up the proxy port for entries that need proxy redirection + if entry.IsRedirectEntry() { + if !lookupDone { + // only lookup once for each filter + // Use 'destPort' from the key as it is already resolved + // from a named port if needed. + proxyport = p.PolicyOwner.LookupRedirectPortBuildLocked(l4.Ingress, string(l4.Protocol), keyFromFilter.DestPort) + lookupDone = true + } + entry.ProxyPort = proxyport + // If the currently allocated proxy port is 0, this is a new + // redirect, for which no port has been allocated yet. Ignore + // it for now. This will be configured by + // UpdateRedirects() once the port has been allocated. + if !entry.IsRedirectEntry() { + return false + } + } + return true + }, ChangeState{}) + } +} + +type getProxyPortFunc func(*L4Filter) (proxyPort uint16, ok bool) + +// UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided +// function to obtain a proxy port number to use. Changes to 'p.PolicyMapState' are collected in +// 'adds' and 'updated' so that they can be reverted when needed. +func (p *EndpointPolicy) UpdateRedirects(ingress bool, getProxyPort getProxyPortFunc, changes ChangeState) { + l4policy := &p.L4Policy.Ingress + if ingress { + l4policy = &p.L4Policy.Egress + } + + l4policy.updateRedirects(p, getProxyPort, changes) +} + +func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, getProxyPort getProxyPortFunc, changes ChangeState) { + // Selectorcache needs to be locked for toMapState (GetLabels()) call + p.SelectorCache.mutex.RLock() + defer p.SelectorCache.mutex.RUnlock() + + for _, l4 := range l4policy.PortRules { + if l4.IsRedirect() { + // Check if we are denying this specific L4 first regardless the L3, if there are any deny policies + if l4policy.features.contains(denyRules) && p.policyMapState.deniesL4(p.PolicyOwner, l4) { + continue + } + + redirectPort, ok := getProxyPort(l4) + if !ok { + continue + } + + // Set the proxy port in the policy map. + l4.toMapState(p, l4policy.features, func(_ Key, entry *MapStateEntry) bool { + if entry.IsRedirectEntry() { + entry.ProxyPort = redirectPort + } + return true + }, changes) + } + } +} + +// ConsumeMapChanges transfers the changes from MapChanges to the caller, +// locking the selector cache to make sure concurrent identity updates +// have completed. +// PolicyOwner (aka Endpoint) is also locked during this call. +func (p *EndpointPolicy) ConsumeMapChanges() (adds, deletes Keys) { + p.selectorPolicy.SelectorCache.mutex.Lock() + defer p.selectorPolicy.SelectorCache.mutex.Unlock() + features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features + return p.policyMapChanges.consumeMapChanges(p.policyMapState, features, p.SelectorCache) +} + +// AllowsIdentity returns whether the specified policy allows +// ingress and egress traffic for the specified numeric security identity. +// If the 'secID' is zero, it will check if all traffic is allowed. +// +// Returning true for either return value indicates all traffic is allowed. +func (p *EndpointPolicy) AllowsIdentity(identity identity.NumericIdentity) (ingress, egress bool) { + key := Key{ + Identity: uint32(identity), + } + + if !p.IngressPolicyEnabled { + ingress = true + } else { + key.TrafficDirection = trafficdirection.Ingress.Uint8() + if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny { + ingress = true + } + } + + if !p.EgressPolicyEnabled { + egress = true + } else { + key.TrafficDirection = trafficdirection.Egress.Uint8() + if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny { + egress = true + } + } + + return ingress, egress +} + +// NewEndpointPolicy returns an empty EndpointPolicy stub. +func NewEndpointPolicy(repo *Repository) *EndpointPolicy { + return &EndpointPolicy{ + selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()), + policyMapState: NewMapState(nil), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/rule.go new file mode 100644 index 0000000000..d5f3101f16 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/rule.go @@ -0,0 +1,790 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + "strconv" + "strings" + + "github.com/cilium/proxy/pkg/policy/api/kafka" + + "github.com/cilium/cilium/pkg/identity" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/policy/api" +) + +type rule struct { + api.Rule + + metadata *ruleMetadata +} + +type ruleMetadata struct { + // mutex protects all fields in this type. + Mutex lock.Mutex + + // IdentitySelected is a cache that maps from an identity to whether + // this rule selects that identity. + IdentitySelected map[identity.NumericIdentity]bool +} + +func newRuleMetadata() *ruleMetadata { + return &ruleMetadata{ + IdentitySelected: make(map[identity.NumericIdentity]bool), + } +} + +func (m *ruleMetadata) delete(identity *identity.Identity) { + m.Mutex.Lock() + defer m.Mutex.Unlock() + delete(m.IdentitySelected, identity.ID) +} + +func (r *rule) String() string { + return r.EndpointSelector.String() +} + +func (r *rule) getSelector() *api.EndpointSelector { + if r.NodeSelector.LabelSelector != nil { + return &r.NodeSelector + } + return &r.EndpointSelector +} + +func (epd *PerSelectorPolicy) appendL7WildcardRule(ctx *SearchContext) api.L7Rules { + // Wildcard rule only needs to be appended if some rules already exist + switch { + case len(epd.L7Rules.HTTP) > 0: + rule := api.PortRuleHTTP{} + if !rule.Exists(epd.L7Rules) { + ctx.PolicyTrace(" Merging HTTP wildcard rule: %+v\n", rule) + epd.L7Rules.HTTP = append(epd.L7Rules.HTTP, rule) + } else { + ctx.PolicyTrace(" Merging HTTP wildcard rule, equal rule already exists: %+v\n", rule) + } + case len(epd.L7Rules.Kafka) > 0: + rule := kafka.PortRule{} + rule.Sanitize() + if !rule.Exists(epd.L7Rules.Kafka) { + ctx.PolicyTrace(" Merging Kafka wildcard rule: %+v\n", rule) + epd.L7Rules.Kafka = append(epd.L7Rules.Kafka, rule) + } else { + ctx.PolicyTrace(" Merging Kafka wildcard rule, equal rule already exists: %+v\n", rule) + } + case len(epd.L7Rules.DNS) > 0: + // Wildcarding at L7 for DNS is specified via allowing all via + // MatchPattern! + rule := api.PortRuleDNS{MatchPattern: "*"} + rule.Sanitize() + if !rule.Exists(epd.L7Rules) { + ctx.PolicyTrace(" Merging DNS wildcard rule: %+v\n", rule) + epd.L7Rules.DNS = append(epd.L7Rules.DNS, rule) + } else { + ctx.PolicyTrace(" Merging DNS wildcard rule, equal rule already exists: %+v\n", rule) + } + case epd.L7Rules.L7Proto != "" && len(epd.L7Rules.L7) > 0: + rule := api.PortRuleL7{} + if !rule.Exists(epd.L7Rules) { + ctx.PolicyTrace(" Merging L7 wildcard rule: %+v\n", rule) + epd.L7Rules.L7 = append(epd.L7Rules.L7, rule) + } else { + ctx.PolicyTrace(" Merging L7 wildcard rule, equal rule already exists: %+v\n", rule) + } + } + return epd.L7Rules +} + +func mergePortProto(ctx *SearchContext, existingFilter, filterToMerge *L4Filter, selectorCache *SelectorCache) (err error) { + // Merge the L7-related data from the filter to merge + // with the L7-related data already in the existing filter. + existingFilter.L7Parser, err = existingFilter.L7Parser.Merge(filterToMerge.L7Parser) + if err != nil { + ctx.PolicyTrace(" Merge conflict: mismatching parsers %s/%s\n", filterToMerge.L7Parser, existingFilter.L7Parser) + return err + } + + if existingFilter.Listener == "" || filterToMerge.Listener == "" { + if filterToMerge.Listener != "" { + existingFilter.Listener = filterToMerge.Listener + } + } else if filterToMerge.Listener != existingFilter.Listener { + ctx.PolicyTrace(" Merge conflict: mismatching CiliumEnvoyConfig listeners %v/%v\n", filterToMerge.Listener, existingFilter.Listener) + return fmt.Errorf("cannot merge conflicting CiliumEnvoyConfig Listeners (%v/%v)", filterToMerge.Listener, existingFilter.Listener) + } + + for cs, newL7Rules := range filterToMerge.PerSelectorPolicies { + // 'cs' will be merged or moved (see below), either way it needs + // to be removed from the map it is in now. + delete(filterToMerge.PerSelectorPolicies, cs) + + if l7Rules, ok := existingFilter.PerSelectorPolicies[cs]; ok { + // existing filter already has 'cs', release and merge L7 rules + selectorCache.RemoveSelector(cs, filterToMerge) + + // skip merging for reserved:none, as it is never + // selected, and toFQDN rules currently translate to + // reserved:none as an endpoint selector, causing a + // merge conflict for different toFQDN destinations + // with different TLS contexts. + if cs.IsNone() { + continue + } + + if l7Rules.Equal(newL7Rules) { + continue // identical rules need no merging + } + + // Merge two non-identical sets of non-nil rules + if l7Rules != nil && l7Rules.IsDeny { + // If existing rule is deny then it's a no-op + // Denies takes priority over any rule. + continue + } else if newL7Rules != nil && newL7Rules.IsDeny { + // Overwrite existing filter if the new rule is a deny case + // Denies takes priority over any rule. + existingFilter.PerSelectorPolicies[cs] = newL7Rules + continue + } + + // One of the rules may be a nil rule, expand it to an empty non-nil rule + if l7Rules == nil { + l7Rules = &PerSelectorPolicy{} + } + if newL7Rules == nil { + newL7Rules = &PerSelectorPolicy{} + } + + // Merge isRedirect flag + l7Rules.isRedirect = l7Rules.isRedirect || newL7Rules.isRedirect + + if l7Rules.Authentication == nil || newL7Rules.Authentication == nil { + if newL7Rules.Authentication != nil { + l7Rules.Authentication = newL7Rules.Authentication + } + } else if !newL7Rules.Authentication.DeepEqual(l7Rules.Authentication) { + ctx.PolicyTrace(" Merge conflict: mismatching auth types %s/%s\n", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode) + return fmt.Errorf("cannot merge conflicting authentication types (%s/%s)", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode) + } + + if l7Rules.TerminatingTLS == nil || newL7Rules.TerminatingTLS == nil { + if newL7Rules.TerminatingTLS != nil { + l7Rules.TerminatingTLS = newL7Rules.TerminatingTLS + } + } else if !newL7Rules.TerminatingTLS.Equal(l7Rules.TerminatingTLS) { + ctx.PolicyTrace(" Merge conflict: mismatching terminating TLS contexts %v/%v\n", newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS) + return fmt.Errorf("cannot merge conflicting terminating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS) + } + if l7Rules.OriginatingTLS == nil || newL7Rules.OriginatingTLS == nil { + if newL7Rules.OriginatingTLS != nil { + l7Rules.OriginatingTLS = newL7Rules.OriginatingTLS + } + } else if !newL7Rules.OriginatingTLS.Equal(l7Rules.OriginatingTLS) { + ctx.PolicyTrace(" Merge conflict: mismatching originating TLS contexts %v/%v\n", newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS) + return fmt.Errorf("cannot merge conflicting originating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS) + } + + // For now we simply merge the set of allowed SNIs from different rules + // to/from the *same remote*, port, and protocol. This means that if any + // rule requires SNI, then all traffic to that remote/port requires TLS, + // even if other merged rules would be fine without TLS. Any SNI from all + // applicable rules is allowed. + // + // Preferably we could allow different rules for each SNI, but for now the + // combination of all L7 rules is allowed for all the SNIs. For example, if + // SNI and TLS termination are used together so that L7 filtering is + // possible, in this example: + // + // - existing: SNI: public.example.com + // - new: SNI: private.example.com HTTP: path="/public" + // + // Separately, these rule allow access to all paths at SNI + // public.example.com and path private.example.com/public, but currently we + // allow all paths also at private.example.com. This may be clamped down if + // there is sufficient demand for SNI and TLS termination together. + // + // Note however that SNI rules are typically used with `toFQDNs`, each of + // which defines a separate destination, so that SNIs for different + // `toFQDNs` will not be merged together. + l7Rules.ServerNames = l7Rules.ServerNames.Merge(newL7Rules.ServerNames) + + // L7 rules can be applied with SNI filtering only if the TLS is also + // terminated + if len(l7Rules.ServerNames) > 0 && !l7Rules.L7Rules.IsEmpty() && l7Rules.TerminatingTLS == nil { + ctx.PolicyTrace(" Merge conflict: cannot use SNI filtering with L7 rules without TLS termination: %v\n", l7Rules.ServerNames) + return fmt.Errorf("cannot merge L7 rules for cached selector %s with SNI filtering without TLS termination: %v", cs.String(), l7Rules.ServerNames) + } + + // empty L7 rules effectively wildcard L7. When merging with a non-empty + // rule, the empty must be expanded to an actual wildcard rule for the + // specific L7 + if !l7Rules.HasL7Rules() && newL7Rules.HasL7Rules() { + l7Rules.L7Rules = newL7Rules.appendL7WildcardRule(ctx) + existingFilter.PerSelectorPolicies[cs] = l7Rules + continue + } + if l7Rules.HasL7Rules() && !newL7Rules.HasL7Rules() { + l7Rules.appendL7WildcardRule(ctx) + existingFilter.PerSelectorPolicies[cs] = l7Rules + continue + } + + // We already know from the L7Parser.Merge() above that there are no + // conflicting parser types, and rule validation only allows one type of L7 + // rules in a rule, so we can just merge the rules here. + for _, newRule := range newL7Rules.HTTP { + if !newRule.Exists(l7Rules.L7Rules) { + l7Rules.HTTP = append(l7Rules.HTTP, newRule) + } + } + for _, newRule := range newL7Rules.Kafka { + if !newRule.Exists(l7Rules.L7Rules.Kafka) { + l7Rules.Kafka = append(l7Rules.Kafka, newRule) + } + } + if l7Rules.L7Proto == "" && newL7Rules.L7Proto != "" { + l7Rules.L7Proto = newL7Rules.L7Proto + } + for _, newRule := range newL7Rules.L7 { + if !newRule.Exists(l7Rules.L7Rules) { + l7Rules.L7 = append(l7Rules.L7, newRule) + } + } + for _, newRule := range newL7Rules.DNS { + if !newRule.Exists(l7Rules.L7Rules) { + l7Rules.DNS = append(l7Rules.DNS, newRule) + } + } + // Update the pointer in the map in case it was newly allocated + existingFilter.PerSelectorPolicies[cs] = l7Rules + } else { // 'cs' is not in the existing filter yet + // Update selector owner to the existing filter + selectorCache.ChangeUser(cs, filterToMerge, existingFilter) + + // Move L7 rules over. + existingFilter.PerSelectorPolicies[cs] = newL7Rules + + if cs.IsWildcard() { + existingFilter.wildcard = cs + } + } + } + + return nil +} + +// mergeIngressPortProto merges all rules which share the same port & protocol that +// select a given set of endpoints. It updates the L4Filter mapped to by the specified +// port and protocol with the contents of the provided PortRule. If the rule +// being merged has conflicting L7 rules with those already in the provided +// L4PolicyMap for the specified port-protocol tuple, it returns an error. +// +// If any rules contain L7 rules that select Host or Remote Node and we should +// accept all traffic from host, the L7 rules will be translated into L7 +// wildcards via 'hostWildcardL7'. That is to say, traffic will be +// forwarded to the proxy for endpoints matching those labels, but the proxy +// will allow all such traffic. +func mergeIngressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, + r api.Ports, p api.PortProtocol, proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) { + // Create a new L4Filter + filterToMerge, err := createL4IngressFilter(policyCtx, endpoints, auth, hostWildcardL7, r, p, proto, ruleLabels) + if err != nil { + return 0, err + } + + err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels) + if err != nil { + return 0, err + } + return 1, err +} + +func traceL3(ctx *SearchContext, peerEndpoints api.EndpointSelectorSlice, direction string, isDeny bool) { + var result strings.Builder + + // Requirements will be cloned into every selector, only trace them once. + if len(peerEndpoints[0].MatchExpressions) > 0 { + sel := peerEndpoints[0] + result.WriteString(" Enforcing requirements ") + result.WriteString(fmt.Sprintf("%+v", sel.MatchExpressions)) + result.WriteString("\n") + } + // EndpointSelector + for _, sel := range peerEndpoints { + if len(sel.MatchLabels) > 0 { + if !isDeny { + result.WriteString(" Allows ") + } else { + result.WriteString(" Denies ") + } + result.WriteString(direction) + result.WriteString(" labels ") + result.WriteString(sel.String()) + result.WriteString("\n") + } + } + ctx.PolicyTrace(result.String()) +} + +// portRulesCoverContext determines whether L4 portions of rules cover the +// specified port models. +// +// Returns true if the list of ports is 0, or the rules match the ports. +func rulePortsCoverSearchContext(ports []api.PortProtocol, ctx *SearchContext) bool { + if len(ctx.DPorts) == 0 { + return true + } + for _, p := range ports { + for _, dp := range ctx.DPorts { + tracePort := api.PortProtocol{ + Protocol: api.L4Proto(dp.Protocol), + } + if dp.Name != "" { + tracePort.Port = dp.Name + } else { + tracePort.Port = strconv.FormatUint(uint64(dp.Port), 10) + } + if p.Covers(tracePort) { + return true + } + } + } + return false +} + +func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) { + found := 0 + + if ctx.From != nil && len(fromEndpoints) > 0 { + if ctx.TraceEnabled() { + traceL3(ctx, fromEndpoints, "from", policyCtx.IsDeny()) + } + if !fromEndpoints.Matches(ctx.From) { + ctx.PolicyTrace(" No label match for %s", ctx.From) + return 0, nil + } + ctx.PolicyTrace(" Found all required labels") + } + + // Daemon options may induce L3 allows for host/world. In this case, if + // we find any L7 rules matching host/world then we need to turn any L7 + // restrictions on these endpoints into L7 allow-all so that the + // traffic is always allowed, but is also always redirected through the + // proxy + hostWildcardL7 := make([]string, 0, 2) + if option.Config.AlwaysAllowLocalhost() { + hostWildcardL7 = append(hostWildcardL7, labels.IDNameHost) + if !option.Config.EnableRemoteNodeIdentity { + hostWildcardL7 = append(hostWildcardL7, labels.IDNameRemoteNode) + } + } + + var ( + cnt int + err error + ) + + // L3-only rule (with requirements folded into fromEndpoints). + if toPorts.Len() == 0 && icmp.Len() == 0 && len(fromEndpoints) > 0 { + cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap) + if err != nil { + return found, err + } + } + + found += cnt + + err = toPorts.Iterate(func(r api.Ports) error { + // For L4 Policy, an empty slice of EndpointSelector indicates that the + // rule allows all at L3 - explicitly specify this by creating a slice + // with the WildcardEndpointSelector. + if len(fromEndpoints) == 0 { + fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector} + } + if !policyCtx.IsDeny() { + ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols()) + } else { + ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols()) + } + if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) { + ctx.PolicyTrace(" No port match found\n") + return nil + } + pr := r.GetPortRule() + if pr != nil { + if pr.Rules != nil && pr.Rules.L7Proto != "" { + ctx.PolicyTrace(" l7proto: \"%s\"\n", pr.Rules.L7Proto) + } + if !pr.Rules.IsEmpty() { + for _, l7 := range pr.Rules.HTTP { + ctx.PolicyTrace(" %+v\n", l7) + } + for _, l7 := range pr.Rules.Kafka { + ctx.PolicyTrace(" %+v\n", l7) + } + for _, l7 := range pr.Rules.L7 { + ctx.PolicyTrace(" %+v\n", l7) + } + } + } + + for _, p := range r.GetPortProtocols() { + if p.Protocol.IsAny() { + cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt + + cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoUDP, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt + + cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoSCTP, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt + } else { + cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt + } + } + return nil + }) + if err != nil { + return found, err + } + + err = icmp.Iterate(func(r api.Ports) error { + if len(fromEndpoints) == 0 { + fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector} + } + if !policyCtx.IsDeny() { + ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols()) + } else { + ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols()) + } + if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) { + ctx.PolicyTrace(" No ICMP type match found\n") + return nil + } + + for _, p := range r.GetPortProtocols() { + cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt + } + return nil + }) + + return found, err +} + +func (state *traceState) selectRule(ctx *SearchContext, r *rule) { + ctx.PolicyTrace("* Rule %s: selected\n", r) + state.selectedRules++ +} + +func (state *traceState) unSelectRule(ctx *SearchContext, labels labels.LabelArray, r *rule) { + ctx.PolicyTraceVerbose(" Rule %s: did not select %+v\n", r, labels) +} + +// resolveIngressPolicy analyzes the rule against the given SearchContext, and +// merges it with any prior-generated policy within the provided L4Policy. +// Requirements based off of all Ingress requirements (set in FromRequires) in +// other rules are stored in the specified slice of LabelSelectorRequirement. +// These requirements are dynamically inserted into a copy of the receiver rule, +// as requirements form conjunctions across all rules. +func (r *rule) resolveIngressPolicy( + policyCtx PolicyContext, + ctx *SearchContext, + state *traceState, + result L4PolicyMap, + requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement, +) ( + L4PolicyMap, error, +) { + if !ctx.rulesSelect { + if !r.getSelector().Matches(ctx.To) { + state.unSelectRule(ctx, ctx.To, r) + return nil, nil + } + } + + state.selectRule(ctx, r) + found, foundDeny := 0, 0 + + if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 { + ctx.PolicyTrace(" No ingress rules\n") + } + for _, ingressRule := range r.Ingress { + fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirements) + cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, ingressRule.Authentication, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result) + if err != nil { + return nil, err + } + if cnt > 0 { + found += cnt + } + } + + oldDeny := policyCtx.SetDeny(true) + defer func() { + policyCtx.SetDeny(oldDeny) + }() + for _, ingressRule := range r.IngressDeny { + fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirementsDeny) + cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, nil, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result) + if err != nil { + return nil, err + } + if cnt > 0 { + foundDeny += cnt + } + } + + if found+foundDeny > 0 { + if found != 0 { + state.matchedRules++ + } + if foundDeny != 0 { + state.matchedDenyRules++ + } + return result, nil + } + + return nil, nil +} + +func (r *rule) matches(securityIdentity *identity.Identity) bool { + r.metadata.Mutex.Lock() + defer r.metadata.Mutex.Unlock() + var ruleMatches bool + + if ruleMatches, cached := r.metadata.IdentitySelected[securityIdentity.ID]; cached { + return ruleMatches + } + isNode := securityIdentity.ID == identity.ReservedIdentityHost + if (r.NodeSelector.LabelSelector != nil) != isNode { + r.metadata.IdentitySelected[securityIdentity.ID] = false + return ruleMatches + } + // Fall back to costly matching. + if ruleMatches = r.getSelector().Matches(securityIdentity.LabelArray); ruleMatches { + // Update cache so we don't have to do costly matching again. + r.metadata.IdentitySelected[securityIdentity.ID] = true + } else { + r.metadata.IdentitySelected[securityIdentity.ID] = false + } + + return ruleMatches +} + +// ****************** EGRESS POLICY ****************** + +func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) { + found := 0 + + if ctx.To != nil && len(toEndpoints) > 0 { + if ctx.TraceEnabled() { + traceL3(ctx, toEndpoints, "to", policyCtx.IsDeny()) + } + if !toEndpoints.Matches(ctx.To) { + ctx.PolicyTrace(" No label match for %s", ctx.To) + return 0, nil + } + ctx.PolicyTrace(" Found all required labels") + } + + var ( + cnt int + err error + ) + + // L3-only rule (with requirements folded into toEndpoints). + if toPorts.Len() == 0 && icmp.Len() == 0 && len(toEndpoints) > 0 { + cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap, fqdns) + if err != nil { + return found, err + } + } + + found += cnt + + err = toPorts.Iterate(func(r api.Ports) error { + // For L4 Policy, an empty slice of EndpointSelector indicates that the + // rule allows all at L3 - explicitly specify this by creating a slice + // with the WildcardEndpointSelector. + if len(toEndpoints) == 0 { + toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector} + } + if !policyCtx.IsDeny() { + ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols()) + } else { + ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols()) + } + + pr := r.GetPortRule() + if pr != nil { + if !pr.Rules.IsEmpty() { + for _, l7 := range pr.Rules.HTTP { + ctx.PolicyTrace(" %+v\n", l7) + } + for _, l7 := range pr.Rules.Kafka { + ctx.PolicyTrace(" %+v\n", l7) + } + for _, l7 := range pr.Rules.L7 { + ctx.PolicyTrace(" %+v\n", l7) + } + } + } + + for _, p := range r.GetPortProtocols() { + if p.Protocol.IsAny() { + cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoTCP, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt + + cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoUDP, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt + + cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoSCTP, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt + } else { + cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt + } + } + return nil + }, + ) + if err != nil { + return found, err + } + + err = icmp.Iterate(func(r api.Ports) error { + if len(toEndpoints) == 0 { + toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector} + } + if !policyCtx.IsDeny() { + ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols()) + } else { + ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols()) + } + + for _, p := range r.GetPortProtocols() { + cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt + } + return nil + }) + + return found, err +} + +// mergeEgressPortProto merges all rules which share the same port & protocol that +// select a given set of endpoints. It updates the L4Filter mapped to by the specified +// port and protocol with the contents of the provided PortRule. If the rule +// being merged has conflicting L7 rules with those already in the provided +// L4PolicyMap for the specified port-protocol tuple, it returns an error. +func mergeEgressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, r api.Ports, p api.PortProtocol, + proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) { + // Create a new L4Filter + filterToMerge, err := createL4EgressFilter(policyCtx, endpoints, auth, r, p, proto, ruleLabels, fqdns) + if err != nil { + return 0, err + } + + err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels) + if err != nil { + return 0, err + } + return 1, err +} + +func (r *rule) resolveEgressPolicy( + policyCtx PolicyContext, + ctx *SearchContext, + state *traceState, + result L4PolicyMap, + requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement, +) ( + L4PolicyMap, error, +) { + if !ctx.rulesSelect { + if !r.getSelector().Matches(ctx.From) { + state.unSelectRule(ctx, ctx.From, r) + return nil, nil + } + } + + state.selectRule(ctx, r) + found, foundDeny := 0, 0 + + if len(r.Egress) == 0 && len(r.EgressDeny) == 0 { + ctx.PolicyTrace(" No egress rules\n") + } + for _, egressRule := range r.Egress { + toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirements) + cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, egressRule.Authentication, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, egressRule.ToFQDNs) + if err != nil { + return nil, err + } + if cnt > 0 { + found += cnt + } + } + + oldDeny := policyCtx.SetDeny(true) + defer func() { + policyCtx.SetDeny(oldDeny) + }() + for _, egressRule := range r.EgressDeny { + toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirementsDeny) + cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, nil, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, nil) + if err != nil { + return nil, err + } + if cnt > 0 { + foundDeny += cnt + } + } + + if found+foundDeny > 0 { + if found != 0 { + state.matchedRules++ + } + if foundDeny != 0 { + state.matchedDenyRules++ + } + return result, nil + } + + return nil, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rules.go b/vendor/github.com/cilium/cilium/pkg/policy/rules.go new file mode 100644 index 0000000000..f5b0a22598 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/rules.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + policyapi "github.com/cilium/cilium/pkg/policy/api" +) + +// ruleSlice is a wrapper around a slice of *rule, which allows for functions +// to be written with []*rule as a receiver. +type ruleSlice []*rule + +func (rules ruleSlice) resolveL4IngressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) { + result := L4PolicyMap{} + + ctx.PolicyTrace("\n") + ctx.PolicyTrace("Resolving ingress policy for %+v\n", ctx.To) + + state := traceState{} + var matchedRules ruleSlice + var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement + + // Iterate over all FromRequires which select ctx.To. These requirements + // will be appended to each EndpointSelector's MatchExpressions in + // each FromEndpoints for all ingress rules. This ensures that FromRequires + // is taken into account when evaluating policy at L4. + for _, r := range rules { + if ctx.rulesSelect || r.getSelector().Matches(ctx.To) { + matchedRules = append(matchedRules, r) + for _, ingressRule := range r.Ingress { + for _, requirement := range ingressRule.FromRequires { + requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...) + } + } + for _, ingressRule := range r.IngressDeny { + for _, requirement := range ingressRule.FromRequires { + requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...) + } + } + } + } + + // Only dealing with matching rules from now on. Mark it in the ctx + oldRulesSelect := ctx.rulesSelect + ctx.rulesSelect = true + + for _, r := range matchedRules { + _, err := r.resolveIngressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny) + if err != nil { + return nil, err + } + state.ruleID++ + } + + state.trace(len(rules), ctx) + + // Restore ctx in case caller uses it again. + ctx.rulesSelect = oldRulesSelect + + return result, nil +} + +func (rules ruleSlice) resolveL4EgressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) { + result := L4PolicyMap{} + + ctx.PolicyTrace("\n") + ctx.PolicyTrace("Resolving egress policy for %+v\n", ctx.From) + + state := traceState{} + var matchedRules ruleSlice + var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement + + // Iterate over all ToRequires which select ctx.To. These requirements will + // be appended to each EndpointSelector's MatchExpressions in each + // ToEndpoints for all egress rules. This ensures that ToRequires is + // taken into account when evaluating policy at L4. + for _, r := range rules { + if ctx.rulesSelect || r.getSelector().Matches(ctx.From) { + matchedRules = append(matchedRules, r) + for _, egressRule := range r.Egress { + for _, requirement := range egressRule.ToRequires { + requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...) + } + } + for _, egressRule := range r.EgressDeny { + for _, requirement := range egressRule.ToRequires { + requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...) + } + } + } + } + + // Only dealing with matching rules from now on. Mark it in the ctx + oldRulesSelect := ctx.rulesSelect + ctx.rulesSelect = true + + for i, r := range matchedRules { + state.ruleID = i + _, err := r.resolveEgressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny) + if err != nil { + return nil, err + } + state.ruleID++ + } + + state.trace(len(rules), ctx) + + // Restore ctx in case caller uses it again. + ctx.rulesSelect = oldRulesSelect + + return result, nil +} + +// updateEndpointsCaches iterates over a given list of rules to update the cache +// within the rule which determines whether or not the given identity is +// selected by that rule. If a rule in the list does select said identity, it is +// added to epSet. Note that epSet can be shared across goroutines! +// Returns whether the endpoint was selected by one of the rules, or if the +// endpoint is nil. +func (rules ruleSlice) updateEndpointsCaches(ep Endpoint) (bool, error) { + if ep == nil { + return false, fmt.Errorf("cannot update caches in rules because endpoint is nil") + } + id := ep.GetID16() + securityIdentity, err := ep.GetSecurityIdentity() + if err != nil { + return false, fmt.Errorf("cannot update caches in rules for endpoint %d because it is being deleted: %s", id, err) + } + + if securityIdentity == nil { + return false, fmt.Errorf("cannot update caches in rules for endpoint %d because it has a nil identity", id) + } + endpointSelected := false + for _, r := range rules { + // NodeSelector can only match nodes, EndpointSelector only pods. + if (r.NodeSelector.LabelSelector != nil) != ep.IsHost() { + continue + } + // Update the matches cache of each rule, and note if + // the ep is selected by any of them. + if ruleMatches := r.matches(securityIdentity); ruleMatches { + endpointSelected = true + } + } + + return endpointSelected, nil +} + +// AsPolicyRules return the internal policyapi.Rule objects as a policyapi.Rules object +func (rules ruleSlice) AsPolicyRules() policyapi.Rules { + policyRules := make(policyapi.Rules, 0, len(rules)) + for _, r := range rules { + policyRules = append(policyRules, &r.Rule) + } + return policyRules +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go new file mode 100644 index 0000000000..1c2cb1fae1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "net" + "net/netip" + "sync" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/identity/cache" + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/policy/api" +) + +// scIdentity is the information we need about a an identity that rules can select +type scIdentity struct { + NID identity.NumericIdentity + lbls labels.LabelArray + nets []*net.IPNet // Most specific CIDR for the identity, if any. + computed bool // nets has been computed + namespace string // value of the namespace label, or "" +} + +// scIdentityCache is a cache of Identities keyed by the numeric identity +type scIdentityCache map[identity.NumericIdentity]scIdentity + +func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentity { + return scIdentity{ + NID: nid, + lbls: lbls, + nets: getLocalScopeNets(nid, lbls), + namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel), + computed: true, + } +} + +// getLocalScopeNets returns the most specific CIDR for a local scope identity. +func getLocalScopeNets(id identity.NumericIdentity, lbls labels.LabelArray) []*net.IPNet { + if id.HasLocalScope() { + var ( + maskSize int + mostSpecificCidr *net.IPNet + ) + for _, lbl := range lbls { + if lbl.Source == labels.LabelSourceCIDR { + _, netIP, err := net.ParseCIDR(lbl.Key) + if err == nil { + if ms, _ := netIP.Mask.Size(); ms > maskSize { + mostSpecificCidr = netIP + maskSize = ms + } + } + } + } + if mostSpecificCidr != nil { + return []*net.IPNet{mostSpecificCidr} + } + } + return nil +} + +func getIdentityCache(ids cache.IdentityCache) scIdentityCache { + idCache := make(map[identity.NumericIdentity]scIdentity, len(ids)) + for nid, lbls := range ids { + idCache[nid] = newIdentity(nid, lbls) + } + return idCache +} + +// userNotification stores the information needed to call +// IdentitySelectionUpdated callbacks to notify users of selector's +// identity changes. These are queued to be able to call the callbacks +// in FIFO order while not holding any locks. +type userNotification struct { + user CachedSelectionUser + selector CachedSelector + added []identity.NumericIdentity + deleted []identity.NumericIdentity + wg *sync.WaitGroup +} + +// SelectorCache caches identities, identity selectors, and the +// subsets of identities each selector selects. +type SelectorCache struct { + mutex lock.RWMutex + + // idAllocator is used to allocate and release identities. It is used + // by the NameManager to manage identities corresponding to FQDNs. + idAllocator cache.IdentityAllocator + + // idCache contains all known identities as informed by the + // kv-store and the local identity facility via our + // UpdateIdentities() function. + idCache scIdentityCache + + // map key is the string representation of the selector being cached. + selectors map[string]*identitySelector + + localIdentityNotifier identityNotifier + + // userCond is a condition variable for receiving signals + // about addition of new elements in userNotes + userCond *sync.Cond + // userMutex protects userNotes and is linked to userCond + userMutex lock.Mutex + // userNotes holds a FIFO list of user notifications to be made + userNotes []userNotification + + // used to lazily start the handler for user notifications. + startNotificationsHandlerOnce sync.Once +} + +// GetModel returns the API model of the SelectorCache. +func (sc *SelectorCache) GetModel() models.SelectorCache { + sc.mutex.RLock() + defer sc.mutex.RUnlock() + + selCacheMdl := make(models.SelectorCache, 0, len(sc.selectors)) + + for selector, idSel := range sc.selectors { + selections := idSel.GetSelections() + ids := make([]int64, 0, len(selections)) + for i := range selections { + ids = append(ids, int64(selections[i])) + } + selMdl := &models.SelectorIdentityMapping{ + Selector: selector, + Identities: ids, + Users: int64(idSel.numUsers()), + Labels: labelArrayToModel(idSel.GetMetadataLabels()), + } + selCacheMdl = append(selCacheMdl, selMdl) + } + + return selCacheMdl +} + +func labelArrayToModel(arr labels.LabelArray) models.LabelArray { + lbls := make(models.LabelArray, 0, len(arr)) + for _, l := range arr { + lbls = append(lbls, &models.Label{ + Key: l.Key, + Value: l.Value, + Source: l.Source, + }) + } + return lbls +} + +func (sc *SelectorCache) handleUserNotifications() { + for { + sc.userMutex.Lock() + for len(sc.userNotes) == 0 { + sc.userCond.Wait() + } + // get the current batch of notifications and release the lock so that SelectorCache + // can't block on userMutex while we call IdentitySelectionUpdated callbacks below. + notifications := sc.userNotes + sc.userNotes = nil + sc.userMutex.Unlock() + + for _, n := range notifications { + n.user.IdentitySelectionUpdated(n.selector, n.added, n.deleted) + n.wg.Done() + } + } +} + +func (sc *SelectorCache) queueUserNotification(user CachedSelectionUser, selector CachedSelector, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) { + sc.startNotificationsHandlerOnce.Do(func() { + go sc.handleUserNotifications() + }) + wg.Add(1) + sc.userMutex.Lock() + sc.userNotes = append(sc.userNotes, userNotification{ + user: user, + selector: selector, + added: added, + deleted: deleted, + wg: wg, + }) + sc.userMutex.Unlock() + sc.userCond.Signal() +} + +// NewSelectorCache creates a new SelectorCache with the given identities. +func NewSelectorCache(allocator cache.IdentityAllocator, ids cache.IdentityCache) *SelectorCache { + sc := &SelectorCache{ + idAllocator: allocator, + idCache: getIdentityCache(ids), + selectors: make(map[string]*identitySelector), + } + sc.userCond = sync.NewCond(&sc.userMutex) + return sc +} + +// SetLocalIdentityNotifier injects the provided identityNotifier into the +// SelectorCache. Currently, this is used to inject the FQDN subsystem into +// the SelectorCache so the SelectorCache can notify the FQDN subsystem when +// it should be aware of a given FQDNSelector for which CIDR identities need +// to be provided upon DNS lookups which corespond to said FQDNSelector. +func (sc *SelectorCache) SetLocalIdentityNotifier(pop identityNotifier) { + sc.localIdentityNotifier = pop +} + +var ( + // Empty slice of numeric identities used for all selectors that select nothing + emptySelection identity.NumericIdentitySlice + // wildcardSelectorKey is used to compare if a key is for a wildcard + wildcardSelectorKey = api.WildcardEndpointSelector.LabelSelector.String() + // noneSelectorKey is used to compare if a key is for "reserved:none" + noneSelectorKey = api.EndpointSelectorNone.LabelSelector.String() +) + +// identityNotifier provides a means for other subsystems to be made aware of a +// given FQDNSelector (currently pkg/fqdn) so that said subsystems can notify +// the SelectorCache about new IPs (via CIDR Identities) which correspond to +// said FQDNSelector. This is necessary since there is nothing intrinsic to a +// CIDR Identity that says that it corresponds to a given FQDNSelector; this +// relationship is contained only via DNS responses, which are handled +// externally. +type identityNotifier interface { + // Lock must be held during any calls to *Locked functions below. + Lock() + + // Unlock must be called after calls to *Locked functions below. + Unlock() + + // RegisterForIPUpdatesLocked exposes this FQDNSelector so that identities + // for IPs contained in a DNS response that matches said selector can + // be propagated back to the SelectorCache via `UpdateFQDNSelector`. + // + // This function should only be called when the SelectorCache has been + // made aware of the FQDNSelector for the first time; subsequent + // updates to the selectors should be made via `UpdateFQDNSelector`. + // + // This function returns the set of IPs for which this selector already applies. + RegisterForIPUpdatesLocked(selector api.FQDNSelector) []netip.Addr + + // UnregisterForIPUpdatesLocked removes this FQDNSelector from the set of + // FQDNSelectors which are being tracked by the identityNotifier. The result + // of this is that no more updates for IPs which correspond to said selector + // are propagated back to the SelectorCache via `UpdateFQDNSelector`. + // This occurs when there are no more users of a given FQDNSelector for the + // SelectorCache. + UnregisterForIPUpdatesLocked(selector api.FQDNSelector) +} + +// +// CachedSelector implementation (== Public API) +// +// No locking needed. +// + +// UpdateResult is a bitfield that indicates what parts of the policy engines +// need to update in order to implement a policy +type UpdateResult uint32 + +const ( + // UpdateResultUpdatePolicyMaps indicates the caller should call EndpointManager.UpdatePolicyMaps() + UpdateResultUpdatePolicyMaps = UpdateResult(1) << iota + + // UpdateResultIdentitiesNeeded indicates the caller should wait for a complete ipcache label injection, + // as the selector is missing identities + UpdateResultIdentitiesNeeded + + UpdateResultUnchanged = UpdateResult(0) +) + +// UpdateFQDNSelector updates the mapping of fqdnKey (the FQDNSelector from a +// policy rule as a string) to to the provided list of IPs. +// +// If the supplied IPs are already known to the SelectorCache (i.e. they already) +// have identities allocated for them), the selector's cachedSelections changes, and +// users are notified asynchronously. Caller must then call Wait() on the supplied +// sync.WaitGroup before triggering any policy updates via UpdatePolicyMaps(). +// Policy updates may need Endpoint locks, so this Wait() can deadlock if the caller +// is holding any endpoint locks. When this is the case, the returned UpdateResult has +// the UpdateResultUpdatePolicyMaps bit set. +// +// In the case where identities are not found for all supplied IPs, the returned +// result has the IdentitiesNeeded bit set to signify that an ipcache update is needed. +// +// The caller must always ensure that all selected IP addresses are known to the ipcache. +// +// Returns an UpdateResult that indicates if the caller should call UpdatePolicyMaps() and/or +// wait for ipcache UpsertMetadata() to finish. +func (sc *SelectorCache) UpdateFQDNSelector(fqdnSelec api.FQDNSelector, ips []netip.Addr, wg *sync.WaitGroup) UpdateResult { + sc.mutex.Lock() + defer sc.mutex.Unlock() + return sc.updateFQDNSelector(fqdnSelec, ips, wg) +} + +func (sc *SelectorCache) updateFQDNSelector(fqdnSelec api.FQDNSelector, ips []netip.Addr, wg *sync.WaitGroup) UpdateResult { + key := fqdnSelec.String() + + idSelector, exists := sc.selectors[key] + if !exists || idSelector == nil { + log.WithField(logfields.Selector, fqdnSelec.String()).Error("UpdateFQDNSelector of selector not registered in SelectorCache!") + } + + fqdnSelect, ok := idSelector.source.(*fqdnSelector) + if !ok { + log.Error("UpdateFQDNSelector for non-FQDN selector!") + return UpdateResultUnchanged + } + + // update wantLabels, then determine set of added and removed identities + fqdnSelect.setSelectorIPs(ips) // this updates wantLabels + + // Note that 'added' and 'deleted' are guaranteed to be + // disjoint, as one of them is left as nil, or an identity + // being in 'identities' is a precondition for an + // identity to be appended to 'added', while the inverse is + // true for 'deleted'. + var added, deleted []identity.NumericIdentity + + // Delete any non-matching entries from cachedSelections + for nid := range idSelector.cachedSelections { + identity := sc.idCache[nid] + if !idSelector.source.matches(identity) { + deleted = append(deleted, nid) + delete(idSelector.cachedSelections, nid) + } + } + + // Scan the cached set of IDs to determine any new matchers + for nid, identity := range sc.idCache { + if idSelector.source.matches(identity) { + if _, exists := idSelector.cachedSelections[nid]; !exists { + added = append(added, nid) + idSelector.cachedSelections[nid] = struct{}{} + } + } + } + + // Result indicates whether or not the caller should call UpdatePolicyMaps + // and / or wait for ipcache to finish. + result := UpdateResultUnchanged + + // If we're missing identities, then the caller also needs to wait for ipcache to do a + // allocation + injection round + // + // This assumes we should have a 1:1 mapping from label to IPs, which is currently the case. + // If this changes, this conditional will be wrong. + if len(idSelector.cachedSelections) != len(fqdnSelect.wantLabels) { + result |= UpdateResultIdentitiesNeeded + } + + idSelector.updateSelections() + + // If the set of selections has changed, then we need to push out an + // incremental update. This will add an incremental change to all users, + // and tell the caller that a call to UpdatePolicyMaps is required. + if len(added)+len(deleted) > 0 { + result |= UpdateResultUpdatePolicyMaps + idSelector.notifyUsers(sc, added, deleted, wg) // disjoint sets, see the comment above + } + + return result +} + +// AddFQDNSelector adds the given api.FQDNSelector in to the selector cache. If +// an identical EndpointSelector has already been cached, the corresponding +// CachedSelector is returned, otherwise one is created and added to the cache. +func (sc *SelectorCache) AddFQDNSelector(user CachedSelectionUser, lbls labels.LabelArray, fqdnSelec api.FQDNSelector) (cachedSelector CachedSelector, added bool) { + key := fqdnSelec.String() + + // Lock NameManager before the SelectorCache. Always. + // This is because SelectorCache and NameManager have interleaving locks, + // so we must always acquire the NameManager lock first + sc.localIdentityNotifier.Lock() + defer sc.localIdentityNotifier.Unlock() + sc.mutex.Lock() + defer sc.mutex.Unlock() + + // If the selector already exists, use it. + idSel, exists := sc.selectors[key] + if exists { + return idSel, idSel.addUser(user) + } + + source := &fqdnSelector{ + selector: fqdnSelec, + } + + // Make the FQDN subsystem aware of this selector and fetch ips + // that the FQDN subsystem is aware of. + currentIPs := sc.localIdentityNotifier.RegisterForIPUpdatesLocked(source.selector) + source.setSelectorIPs(currentIPs) + + return sc.addSelector(user, key, source) +} + +func (sc *SelectorCache) addSelector(user CachedSelectionUser, key string, source selectorSource) (CachedSelector, bool) { + idSel := &identitySelector{ + key: key, + users: make(map[CachedSelectionUser]struct{}), + cachedSelections: make(map[identity.NumericIdentity]struct{}), + source: source, + } + sc.selectors[key] = idSel + + // Scan the cached set of IDs to determine any new matchers + for nid, identity := range sc.idCache { + if idSel.source.matches(identity) { + idSel.cachedSelections[nid] = struct{}{} + } + } + + // Note: No notifications are sent for the existing + // identities. Caller must use GetSelections() to get the + // current selections after adding a selector. This way the + // behavior is the same between the two cases here (selector + // is already cached, or is a new one). + + // Create the immutable slice representation of the selected + // numeric identities + idSel.updateSelections() + + return idSel, idSel.addUser(user) + +} + +// FindCachedIdentitySelector finds the given api.EndpointSelector in the +// selector cache, returning nil if one can not be found. +func (sc *SelectorCache) FindCachedIdentitySelector(selector api.EndpointSelector) CachedSelector { + key := selector.CachedString() + sc.mutex.Lock() + idSel := sc.selectors[key] + sc.mutex.Unlock() + return idSel +} + +// AddIdentitySelector adds the given api.EndpointSelector in to the +// selector cache. If an identical EndpointSelector has already been +// cached, the corresponding CachedSelector is returned, otherwise one +// is created and added to the cache. +func (sc *SelectorCache) AddIdentitySelector(user CachedSelectionUser, lbls labels.LabelArray, selector api.EndpointSelector) (cachedSelector CachedSelector, added bool) { + // The key returned here may be different for equivalent + // labelselectors, if the selector's requirements are stored + // in different orders. When this happens we'll be tracking + // essentially two copies of the same selector. + key := selector.CachedString() + sc.mutex.Lock() + defer sc.mutex.Unlock() + idSel, exists := sc.selectors[key] + if exists { + return idSel, idSel.addUser(user) + } + + // Selectors are never modified once a rule is placed in the policy repository, + // so no need to deep copy. + source := &labelIdentitySelector{ + selector: selector, + } + // check is selector has a namespace match or requirement + if namespaces, ok := selector.GetMatch(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel); ok { + source.namespaces = namespaces + } + + return sc.addSelector(user, key, source) +} + +// lock must be held +func (sc *SelectorCache) removeSelectorLocked(selector CachedSelector, user CachedSelectionUser) { + key := selector.String() + sel, exists := sc.selectors[key] + if exists { + if sel.removeUser(user) { + sel.source.remove(sc.localIdentityNotifier) + delete(sc.selectors, key) + } + } +} + +// RemoveSelector removes CachedSelector for the user. +func (sc *SelectorCache) RemoveSelector(selector CachedSelector, user CachedSelectionUser) { + sc.localIdentityNotifier.Lock() + sc.mutex.Lock() + sc.removeSelectorLocked(selector, user) + sc.mutex.Unlock() + sc.localIdentityNotifier.Unlock() + +} + +// RemoveSelectors removes CachedSelectorSlice for the user. +func (sc *SelectorCache) RemoveSelectors(selectors CachedSelectorSlice, user CachedSelectionUser) { + sc.localIdentityNotifier.Lock() + sc.mutex.Lock() + for _, selector := range selectors { + sc.removeSelectorLocked(selector, user) + } + sc.mutex.Unlock() + sc.localIdentityNotifier.Unlock() +} + +// ChangeUser changes the CachedSelectionUser that gets updates on the +// updates on the cached selector. +func (sc *SelectorCache) ChangeUser(selector CachedSelector, from, to CachedSelectionUser) { + key := selector.String() + sc.mutex.Lock() + idSel, exists := sc.selectors[key] + if exists { + // Add before remove so that the count does not dip to zero in between, + // as this causes FQDN unregistration (if applicable). + idSel.addUser(to) + // ignoring the return value as we have just added a user above + idSel.removeUser(from) + } + sc.mutex.Unlock() +} + +// UpdateIdentities propagates identity updates to selectors +// +// The caller is responsible for making sure the same identity is not +// present in both 'added' and 'deleted'. +// +// Caller should Wait() on the returned sync.WaitGroup before triggering any +// policy updates. Policy updates may need Endpoint locks, so this Wait() can +// deadlock if the caller is holding any endpoint locks. +func (sc *SelectorCache) UpdateIdentities(added, deleted cache.IdentityCache, wg *sync.WaitGroup) { + sc.mutex.Lock() + defer sc.mutex.Unlock() + + // Update idCache so that newly added selectors get + // prepopulated with all matching numeric identities. + for numericID := range deleted { + if old, exists := sc.idCache[numericID]; exists { + log.WithFields(logrus.Fields{ + logfields.Identity: numericID, + logfields.Labels: old.lbls, + }).Debug("UpdateIdentities: Deleting identity") + delete(sc.idCache, numericID) + } else { + log.WithFields(logrus.Fields{ + logfields.Identity: numericID, + }).Warning("UpdateIdentities: Skipping Delete of a non-existing identity") + delete(deleted, numericID) + } + } + for numericID, lbls := range added { + if old, exists := sc.idCache[numericID]; exists { + // Skip if no change. Not skipping if label + // order is different, but identity labels are + // sorted for the kv-store, so there should + // not be too many false negatives. + if lbls.Equals(old.lbls) { + log.WithFields(logrus.Fields{ + logfields.Identity: numericID, + }).Debug("UpdateIdentities: Skipping add of an existing identical identity") + delete(added, numericID) + continue + } + scopedLog := log.WithFields(logrus.Fields{ + logfields.Identity: numericID, + logfields.Labels: old.lbls, + logfields.Labels + "(new)": lbls}, + ) + msg := "UpdateIdentities: Updating an existing identity" + // Warn if any other ID has their labels change, besides local + // host. The local host can have its labels change at runtime if + // the kube-apiserver is running on the local host, see + // ipcache.TriggerLabelInjection(). + if numericID == identity.ReservedIdentityHost { + scopedLog.Debug(msg) + } else { + scopedLog.Warning(msg) + } + } else { + log.WithFields(logrus.Fields{ + logfields.Identity: numericID, + logfields.Labels: lbls, + }).Debug("UpdateIdentities: Adding a new identity") + } + sc.idCache[numericID] = newIdentity(numericID, lbls) + } + + if len(deleted)+len(added) > 0 { + // Iterate through all locally used identity selectors and + // update the cached numeric identities as required. + for _, idSel := range sc.selectors { + var adds, dels []identity.NumericIdentity + for numericID := range deleted { + if _, exists := idSel.cachedSelections[numericID]; exists { + dels = append(dels, numericID) + delete(idSel.cachedSelections, numericID) + } + } + for numericID := range added { + if _, exists := idSel.cachedSelections[numericID]; !exists { + if idSel.source.matches(sc.idCache[numericID]) { + adds = append(adds, numericID) + idSel.cachedSelections[numericID] = struct{}{} + } + } + } + if len(dels)+len(adds) > 0 { + idSel.updateSelections() + idSel.notifyUsers(sc, adds, dels, wg) + } + } + } +} + +// GetNetsLocked returns the most specific CIDR for an identity. For the "World" identity +// it returns both IPv4 and IPv6. +func (sc *SelectorCache) GetNetsLocked(id identity.NumericIdentity) []*net.IPNet { + ident, ok := sc.idCache[id] + if !ok { + return nil + } + if !ident.computed { + log.WithFields(logrus.Fields{ + logfields.Identity: id, + logfields.Labels: ident.lbls, + }).Warning("GetNetsLocked: Identity with missing nets!") + } + return ident.nets +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache_selector.go b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache_selector.go new file mode 100644 index 0000000000..6cecd669ef --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache_selector.go @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "bytes" + "encoding/json" + "net/netip" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/cilium/cilium/pkg/identity" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/policy/api" +) + +// CachedSelector represents an identity selector owned by the selector cache +type CachedSelector interface { + // GetSelections returns the cached set of numeric identities + // selected by the CachedSelector. The retuned slice must NOT + // be modified, as it is shared among multiple users. + GetSelections() identity.NumericIdentitySlice + + // GetMetadataLabels returns metadata labels for additional context + // surrounding the selector. These are typically the labels associated with + // Cilium rules. + GetMetadataLabels() labels.LabelArray + + // Selects return 'true' if the CachedSelector selects the given + // numeric identity. + Selects(nid identity.NumericIdentity) bool + + // IsWildcard returns true if the endpoint selector selects + // all endpoints. + IsWildcard() bool + + // IsNone returns true if the selector never selects anything + IsNone() bool + + // String returns the string representation of this selector. + // Used as a map key. + String() string +} + +// CachedSelectorSlice is a slice of CachedSelectors that can be sorted. +type CachedSelectorSlice []CachedSelector + +// MarshalJSON returns the CachedSelectors as JSON formatted buffer +func (s CachedSelectorSlice) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString("[") + for i, selector := range s { + buf, err := json.Marshal(selector.String()) + if err != nil { + return nil, err + } + + buffer.Write(buf) + if i < len(s)-1 { + buffer.WriteString(",") + } + } + buffer.WriteString("]") + return buffer.Bytes(), nil +} + +func (s CachedSelectorSlice) Len() int { return len(s) } +func (s CachedSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s CachedSelectorSlice) Less(i, j int) bool { + return strings.Compare(s[i].String(), s[j].String()) < 0 +} + +// SelectsAllEndpoints returns whether the CachedSelectorSlice selects all +// endpoints, which is true if the wildcard endpoint selector is present in the +// slice. +func (s CachedSelectorSlice) SelectsAllEndpoints() bool { + for _, selector := range s { + if selector.IsWildcard() { + return true + } + } + return false +} + +// CachedSelectionUser inserts selectors into the cache and gets update +// callbacks whenever the set of selected numeric identities change for +// the CachedSelectors pushed by it. +type CachedSelectionUser interface { + // IdentitySelectionUpdated implementations MUST NOT call back + // to the name manager or the selector cache while executing this function! + // + // The caller is responsible for making sure the same identity is not + // present in both 'added' and 'deleted'. + IdentitySelectionUpdated(selector CachedSelector, added, deleted []identity.NumericIdentity) +} + +// identitySelector is the internal type for all selectors in the +// selector cache. +// +// identitySelector represents the mapping of an EndpointSelector +// to a slice of identities. These mappings are updated via two +// different processes: +// +// 1. When policy rules are changed these are added and/or deleted +// depending on what selectors the rules contain. Cached selections of +// new identitySelectors are pre-populated from the set of currently +// known identities. +// +// 2. When reachacble identities appear or disappear, either via local +// allocation (CIDRs), or via the KV-store (remote endpoints). In this +// case all existing identitySelectors are walked through and their +// cached selections are updated as necessary. +// +// In both of the above cases the set of existing identitySelectors is +// write locked. +// +// To minimize the upkeep the identity selectors are shared across +// all IdentityPolicies, so that only one copy exists for each +// identitySelector. Users of the SelectorCache take care of creating +// identitySelectors as needed by identity policies. The set of +// identitySelectors is read locked during an IdentityPolicy update so +// that the the policy is always updated using a coherent set of +// cached selections. +// +// identitySelector is used as a map key, so it must not be implemented by a +// map, slice, or a func, or a runtime panic will be triggered. In all +// cases below identitySelector is being implemented by structs. +// +// identitySelector is used in the policy engine as a map key, +// so it must always be given to the user as a pointer to the actual type. +// (The public methods only expose the CachedSelector interface.) +type identitySelector struct { + source selectorSource + key string + selections atomic.Pointer[identity.NumericIdentitySlice] + users map[CachedSelectionUser]struct{} + cachedSelections map[identity.NumericIdentity]struct{} + metadataLbls labels.LabelArray +} + +// identitySelector implements CachedSelector +var _ CachedSelector = (*identitySelector)(nil) + +type selectorSource interface { + matches(scIdentity) bool + + remove(identityNotifier) +} + +// fqdnSelector is implemented as an updatable bag-of-labels. Any identity that matches +// any of the labels in wantLabels is selected. Unlike the identitySelector, this selector +// is "mutable" in that the FQDN subsystem may update the set of matched labels arbitrarily. +type fqdnSelector struct { + selector api.FQDNSelector + wantLabels labels.LabelArray // MUST be sorted +} + +func (f *fqdnSelector) remove(dnsProxy identityNotifier) { + dnsProxy.UnregisterForIPUpdatesLocked(f.selector) +} + +// setSelectorIPs updates the set of desired labels associated with this selector. +// lock must be held +func (f *fqdnSelector) setSelectorIPs(ips []netip.Addr) { + lbls := make(labels.LabelArray, 0, len(ips)) + for _, ip := range ips { + l, err := labels.IPStringToLabel(ip.String()) + if err != nil { + // not possible + continue + } + lbls = append(lbls, l) + } + lbls.Sort() + f.wantLabels = lbls +} + +// matches returns true if the identity contains at least one label +// that is in wantLabels. +// This is reasonably efficient, as it relies on both arrays being sorted. +func (f *fqdnSelector) matches(identity scIdentity) bool { + wantIdx := 0 + checkIdx := 0 + + // Both arrays are sorted; walk through until we get a match + for wantIdx < len(f.wantLabels) && checkIdx < len(identity.lbls) { + want := f.wantLabels[wantIdx] + check := identity.lbls[checkIdx] + if want == check { + return true + } + + // Not equal, bump + if check.Key < want.Key { + checkIdx++ + } else { + wantIdx++ + } + } + + return false +} + +type labelIdentitySelector struct { + selector api.EndpointSelector + namespaces []string // allowed namespaces, or "" +} + +// xxxMatches returns true if the CachedSelector matches given labels. +// This is slow, but only used for policy tracing, so it's OK. +func (l *labelIdentitySelector) xxxMatches(labels labels.LabelArray) bool { + return l.selector.Matches(labels) +} + +func (l *labelIdentitySelector) matchesNamespace(ns string) bool { + if len(l.namespaces) > 0 { + if ns != "" { + for i := range l.namespaces { + if ns == l.namespaces[i] { + return true + } + } + } + // namespace required, but no match + return false + } + // no namespace required, match + return true +} + +func (l *labelIdentitySelector) matches(identity scIdentity) bool { + return l.matchesNamespace(identity.namespace) && l.selector.Matches(identity.lbls) +} + +func (l *labelIdentitySelector) remove(_ identityNotifier) { + // only useful for fqdn selectors +} + +// lock must be held +// +// The caller is responsible for making sure the same identity is not +// present in both 'added' and 'deleted'. +func (i *identitySelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) { + for user := range i.users { + // pass 'f' to the user as '*fqdnSelector' + sc.queueUserNotification(user, i, added, deleted, wg) + } +} + +// Equal is used by checker.Equals, and only considers the identity of the selector, +// ignoring the internal state! +func (i *identitySelector) Equal(b *identitySelector) bool { + return i.key == b.key +} + +// +// CachedSelector implementation (== Public API) +// +// No locking needed. +// + +// GetSelections returns the set of numeric identities currently +// selected. The cached selections can be concurrently updated. In +// that case GetSelections() will return either the old or new version +// of the selections. If the old version is returned, the user is +// guaranteed to receive a notification including the update. +func (i *identitySelector) GetSelections() identity.NumericIdentitySlice { + selections := i.selections.Load() + if selections == nil { + return emptySelection + } + return *selections +} + +func (i *identitySelector) GetMetadataLabels() labels.LabelArray { + return i.metadataLbls +} + +// Selects return 'true' if the CachedSelector selects the given +// numeric identity. +func (i *identitySelector) Selects(nid identity.NumericIdentity) bool { + if i.IsWildcard() { + return true + } + nids := i.GetSelections() + idx := sort.Search(len(nids), func(i int) bool { return nids[i] >= nid }) + return idx < len(nids) && nids[idx] == nid +} + +// IsWildcard returns true if the endpoint selector selects all +// endpoints. +func (i *identitySelector) IsWildcard() bool { + return i.key == wildcardSelectorKey +} + +// IsNone returns true if the endpoint selector never selects anything. +func (i *identitySelector) IsNone() bool { + return i.key == noneSelectorKey +} + +// String returns the map key for this selector +func (i *identitySelector) String() string { + return i.key +} + +// +// identitySelector implementation (== internal API) +// + +// lock must be held +func (i *identitySelector) addUser(user CachedSelectionUser) (added bool) { + if _, exists := i.users[user]; exists { + return false + } + i.users[user] = struct{}{} + return true +} + +// locks must be held for the dnsProxy and the SelectorCache (if the selector is a FQDN selector) +func (i *identitySelector) removeUser(user CachedSelectionUser) (last bool) { + delete(i.users, user) + return len(i.users) == 0 +} + +// lock must be held +func (i *identitySelector) numUsers() int { + return len(i.users) +} + +// updateSelections updates the immutable slice representation of the +// cached selections after the cached selections have been changed. +// +// lock must be held +func (i *identitySelector) updateSelections() { + selections := make(identity.NumericIdentitySlice, len(i.cachedSelections)) + idx := 0 + for nid := range i.cachedSelections { + selections[idx] = nid + idx++ + } + // Sort the numeric identities so that the map iteration order + // does not matter. This makes testing easier, but may help + // identifying changes easier also otherwise. + sort.Slice(selections, func(i, j int) bool { + return selections[i] < selections[j] + }) + i.setSelections(&selections) +} + +func (i *identitySelector) setSelections(selections *identity.NumericIdentitySlice) { + if len(*selections) > 0 { + i.selections.Store(selections) + } else { + i.selections.Store(&emptySelection) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go new file mode 100644 index 0000000000..8255781887 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// package trafficdirection specifies the directionality of policy in a +// numeric representation. +package trafficdirection diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go new file mode 100644 index 0000000000..9b530cfc50 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package trafficdirection + +// TrafficDirection specifies the directionality of policy (ingress or egress). +type TrafficDirection uint8 + +const ( + // Invalid represents an invalid traffic direction. + Invalid TrafficDirection = 2 + + // Egress represents egress traffic. + Egress TrafficDirection = 1 + + // Ingress represents ingress traffic. + Ingress TrafficDirection = 0 +) + +// Uint8 normalizes the TrafficDirection for insertion into BPF maps. +func (td TrafficDirection) Uint8() uint8 { + return uint8(td) +} + +func (td TrafficDirection) String() string { + if td == Egress { + return "Egress" + } else if td == Ingress { + return "Ingress" + } + + return "Unknown" +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trigger.go b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go new file mode 100644 index 0000000000..bec4139b43 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "strings" + "sync" + + "github.com/cilium/cilium/pkg/endpoint/regeneration" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" + "github.com/cilium/cilium/pkg/trigger" +) + +// TriggerPolicyUpdates triggers the policy update trigger. +// +// To follow what the trigger does, see NewUpdater. +func (u *Updater) TriggerPolicyUpdates(force bool, reason string) { + if force { + log.Debugf("Artificially increasing policy revision to enforce policy recalculation") + u.repo.BumpRevision() + } + + u.TriggerWithReason(reason) +} + +// NewUpdater returns a new Updater instance to handle triggering policy +// updates ready for use. +func NewUpdater(r *Repository, regen regenerator) (*Updater, error) { + t, err := trigger.NewTrigger(trigger.Parameters{ + Name: "policy_update", + MetricsObserver: &TriggerMetrics{}, + MinInterval: option.Config.PolicyTriggerInterval, + // Triggers policy updates for every local endpoint. + // This may be called in a variety of situations: after policy changes, + // changes in agent configuration, changes in endpoint labels, and + // change of security identities. + TriggerFunc: func(reasons []string) { + log.Debug("Regenerating all endpoints") + reason := strings.Join(reasons, ", ") + + regenerationMetadata := ®eneration.ExternalRegenerationMetadata{ + Reason: reason, + RegenerationLevel: regeneration.RegenerateWithoutDatapath, + } + regen.RegenerateAllEndpoints(regenerationMetadata) + }, + }) + if err != nil { + return nil, err + } + return &Updater{ + Trigger: t, + repo: r, + }, nil +} + +// Updater is responsible for triggering policy updates, in order to perform +// policy recalculation. +type Updater struct { + *trigger.Trigger + + repo *Repository +} + +type regenerator interface { + // RegenerateAllEndpoints should trigger a regeneration of all endpoints. + RegenerateAllEndpoints(*regeneration.ExternalRegenerationMetadata) *sync.WaitGroup +} + +// TriggerMetrics handles the metrics for trigger policy recalculations. +type TriggerMetrics struct{} + +func (p *TriggerMetrics) QueueEvent(reason string) { + if metrics.TriggerPolicyUpdateTotal.IsEnabled() { + metrics.TriggerPolicyUpdateTotal.WithLabelValues(reason).Inc() + } +} + +func (p *TriggerMetrics) PostRun(duration, latency time.Duration, folds int) { + if metrics.TriggerPolicyUpdateCallDuration.IsEnabled() { + metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("duration").Observe(duration.Seconds()) + metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("latency").Observe(latency.Seconds()) + } + if metrics.TriggerPolicyUpdateFolds.IsEnabled() { + metrics.TriggerPolicyUpdateFolds.Set(float64(folds)) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/utils.go new file mode 100644 index 0000000000..fa3c485903 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/utils.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import "github.com/cilium/cilium/pkg/labels" + +// JoinPath returns a joined path from a and b. +func JoinPath(a, b string) string { + return a + labels.PathDelimiter + b +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/visibility.go b/vendor/github.com/cilium/cilium/pkg/policy/visibility.go new file mode 100644 index 0000000000..2348644dab --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/policy/visibility.go @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package policy + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/policy/api" + "github.com/cilium/cilium/pkg/u8proto" +) + +var ( + singleAnnotationRegex = "<(Ingress|Egress)/([1-9][0-9]{1,5})/(TCP|UDP|SCTP|ANY)/([A-Za-z]{3,32})>" + annotationRegex = regexp.MustCompile(fmt.Sprintf(`^((%s)(,(%s))*)$`, singleAnnotationRegex, singleAnnotationRegex)) +) + +func validateL7ProtocolWithDirection(dir string, proto L7ParserType) error { + switch proto { + case ParserTypeHTTP: + return nil + case ParserTypeDNS: + if dir == "Egress" { + return nil + } + case ParserTypeKafka: + return nil + default: + return fmt.Errorf("unsupported parser type %s", proto) + + } + return fmt.Errorf("%s not allowed with direction %s", proto, dir) +} + +// NewVisibilityPolicy generates the VisibilityPolicy that is encoded in the +// annotation parameter. +// Returns an error: +// - if the annotation does not correspond to the expected +// format for a visibility annotation. +// - if there is a conflict between the state encoded in the annotation (e.g., +// different L7 protocols for the same L4 port / protocol / traffic direction. +func NewVisibilityPolicy(anno string) (*VisibilityPolicy, error) { + if !annotationRegex.MatchString(anno) { + return nil, fmt.Errorf("annotation for proxy visibility did not match expected format %s", annotationRegex.String()) + } + + nvp := &VisibilityPolicy{ + Ingress: make(DirectionalVisibilityPolicy), + Egress: make(DirectionalVisibilityPolicy), + } + + // TODO: look into using regex groups. + anSplit := strings.Split(anno, ",") + for i := range anSplit { + proxyAnnoSplit := strings.Split(anSplit[i], "/") + if len(proxyAnnoSplit) != 4 { + err := fmt.Errorf("invalid number of fields (%d) in annotation", len(proxyAnnoSplit)) + return nil, err + } + // Ingress|Egress + // Don't need to validate the content itself, regex already did that. + direction := proxyAnnoSplit[0][1:] + port := proxyAnnoSplit[1] + + portInt, err := strconv.ParseUint(port, 10, 16) + if err != nil { + return nil, fmt.Errorf("unable to parse port: %s", err) + } + + // Don't need to validate, regex already did that. + l4Proto := proxyAnnoSplit[2] + u8Prot, err := u8proto.ParseProtocol(l4Proto) + if err != nil { + return nil, fmt.Errorf("invalid L4 protocol %s", l4Proto) + } + + // ANY equates to TCP and UDP in the datapath; the datapath itself does + // not support 'Any' protocol paired with a port at L4. + var protos []u8proto.U8proto + if u8Prot == u8proto.ANY { + protos = append(protos, u8proto.TCP) + protos = append(protos, u8proto.UDP) + protos = append(protos, u8proto.SCTP) + } else { + protos = append(protos, u8Prot) + } + // Remove trailing '>'. + l7Protocol := L7ParserType(strings.ToLower(proxyAnnoSplit[3][:len(proxyAnnoSplit[3])-1])) + + if err := validateL7ProtocolWithDirection(direction, l7Protocol); err != nil { + return nil, err + } + + var dvp DirectionalVisibilityPolicy + var ingress bool + if direction == "Ingress" { + dvp = nvp.Ingress + ingress = true + } else { + dvp = nvp.Egress + ingress = false + } + + for _, prot := range protos { + pp := strconv.FormatUint(portInt, 10) + "/" + prot.String() + if res, ok := dvp[pp]; ok { + if res.Parser != l7Protocol { + return nil, fmt.Errorf("duplicate annotations with different L7 protocols %s and %s for %s", res.Parser, l7Protocol, pp) + } + } + + l7Meta := generateL7AllowAllRules(l7Protocol) + + dvp[pp] = &VisibilityMetadata{ + Parser: l7Protocol, + Port: uint16(portInt), + Proto: prot, + Ingress: ingress, + L7Metadata: l7Meta, + } + } + } + + return nvp, nil +} + +func generateL7AllowAllRules(parser L7ParserType) L7DataMap { + var m L7DataMap + switch parser { + case ParserTypeDNS: + m = L7DataMap{} + // Create an entry to explicitly allow all at L7 for DNS. + emptyL3Selector := &identitySelector{source: &labelIdentitySelector{selector: api.WildcardEndpointSelector}, key: wildcardSelectorKey} + m[emptyL3Selector] = &PerSelectorPolicy{ + L7Rules: api.L7Rules{ + DNS: []api.PortRuleDNS{ + { + MatchPattern: "*", + }, + }, + }, + } + } + return m +} + +// VisibilityMetadata encodes state about what type of traffic should be +// redirected to an L7Proxy. Implements the ProxyPolicy interface. +// TODO: an L4Filter could be composed of this type. +type VisibilityMetadata struct { + // Parser represents the proxy to which traffic should be redirected. + Parser L7ParserType + + // Port, in tandem with Proto, signifies which L4 port for which traffic + // should be redirected. + Port uint16 + + // Proto, in tandem with port, signifies which L4 protocol for which traffic + // should be redirected. + Proto u8proto.U8proto + + // Ingress specifies whether ingress traffic at the given L4 port / protocol + // should be redirected to the proxy. + Ingress bool + + // L7Metadata encodes optional information what is allowed at L7 for + // visibility. Some specific protocol parsers do not need this set for + // allowing of traffic (e.g., HTTP), but some do (e.g., DNS). + L7Metadata L7DataMap +} + +// DirectionalVisibilityPolicy is a mapping of VisibilityMetadata keyed by +// L4 Port / L4 Protocol (e.g., 80/TCP) for a given traffic direction (e.g., +// ingress or egress). This encodes at which L4 Port / L4 Protocol traffic +// should be redirected to a given L7 proxy. An empty instance of this type +// indicates that no traffic should be redirected. +type DirectionalVisibilityPolicy map[string]*VisibilityMetadata + +// VisibilityPolicy represents for both ingress and egress which types of +// traffic should be redirected to a given L7 proxy. +type VisibilityPolicy struct { + Ingress DirectionalVisibilityPolicy + Egress DirectionalVisibilityPolicy + Error error +} + +// CopyL7RulesPerEndpoint returns a shallow copy of the L7Metadata of the +// L4Filter. +func (v *VisibilityMetadata) CopyL7RulesPerEndpoint() L7DataMap { + if v.L7Metadata != nil { + return v.L7Metadata.ShallowCopy() + } + return nil +} + +// GetL7Parser returns the L7ParserType for this VisibilityMetadata. +func (v *VisibilityMetadata) GetL7Parser() L7ParserType { + return v.Parser +} + +// GetIngress returns whether the VisibilityMetadata applies at ingress or +// egress. +func (v *VisibilityMetadata) GetIngress() bool { + return v.Ingress +} + +// GetPort returns at which port the VisibilityMetadata applies. +func (v *VisibilityMetadata) GetPort() uint16 { + return v.Port +} + +// GetListener returns the optional listener name. +func (l4 *VisibilityMetadata) GetListener() string { + return "" +} diff --git a/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go new file mode 100644 index 0000000000..1044fc67e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package accesslog + +import ( + "net" + "net/http" + "net/url" +) + +// FlowType is the type to indicate the flow direction +type FlowType string + +const ( + // TypeRequest is a request message + TypeRequest FlowType = "Request" + + // TypeResponse is a response to a request + TypeResponse FlowType = "Response" + + // TypeSample is a packet sample + TypeSample FlowType = "Sample" +) + +// FlowVerdict is the verdict passed on the flow +type FlowVerdict string + +const ( + // VerdictForwarded indicates that the flow was forwarded + VerdictForwarded FlowVerdict = "Forwarded" + + // VerdictDenied indicates that the flow was denied + VerdictDenied = "Denied" + + // VerdictError indicates that there was an error processing the flow + VerdictError = "Error" + + // VerdictError indicates that the flow was redirected through the proxy + VerdictRedirected = "Redirected" +) + +// ObservationPoint is the type used to describe point of observation +type ObservationPoint string + +const ( + // Ingress indicates event was generated at ingress + Ingress ObservationPoint = "Ingress" + + // Egress indicates event was generated at egress + Egress ObservationPoint = "Egress" +) + +// IPVersion indicates the flow's IP version +type IPVersion uint8 + +const ( + // VersionIPv4 indicates IPv4 + VersionIPv4 IPVersion = iota + // VersionIPV6 indicates IPv6 + VersionIPV6 +) + +// EndpointInfo contains information about the sending (resp. receiving) endpoint. +// If the field using this struct is SourceEndpoint, all fields correspond to +// the sending endpoint, if the field using this struct is DestinationEndpoint, +// then all fields correspond to the receiving endpoint. +type EndpointInfo struct { + // ID is the endpoint id + ID uint64 + + // IPv4 is the IPv4 address of the endpoint + IPv4 string + + // IPv6 is the IPv6 address of the endpoint + IPv6 string + + // Port represents the source point for SourceEndpoint and the + // destination port for DestinationEndpoint + Port uint16 + + // Identity is the security identity of the endpoint + Identity uint64 + + // Labels is the list of security relevant labels of the endpoint + Labels []string +} + +// ServiceInfo contains information about the Kubernetes service +type ServiceInfo struct { + // Name specifies the name of the service + Name string + + // IPPort is the IP and transport port of the service + IPPort IPPort +} + +// FlowEvent identifies the event type of an L4 log record +type FlowEvent string + +const ( + // FlowAdded means that this is a new flow + FlowAdded FlowEvent = "FlowAdded" + + // FlowRemoved means that a flow has been deleted + FlowRemoved FlowEvent = "FlowRemoved" +) + +// DropReason indicates the reason why the flow was dropped +type DropReason uint16 + +// TransportProtocol defines layer 4 protocols +type TransportProtocol uint16 + +// NodeAddressInfo holds addressing information of the node the agent runs on +type NodeAddressInfo struct { + IPv4 string + IPv6 string +} + +// IPPort bundles an IP address and port number +type IPPort struct { + IP string + Port uint16 +} + +// LogRecord is the structure used to log individual request/response +// processing events or sampled packets +type LogRecord struct { + // Type is the type of the flow + Type FlowType + + // Timestamp is the start of a request, the end of a response, or the time the packet has been sampled, + // depending on the flow type + Timestamp string + + // NodeAddressInfo contains the IPs of the node where the event was generated + NodeAddressInfo NodeAddressInfo + + // ObservationPoint indicates where the flow was observed + ObservationPoint ObservationPoint + + // SourceEndpoint is information about the source endpoint, if available + SourceEndpoint EndpointInfo + + // DestinationEndpoint is information about the destination endpoint, if available + DestinationEndpoint EndpointInfo + + // IPVersion indicates the version of the IP protocol in use + IPVersion IPVersion + + // Verdict is the verdict on the flow taken + Verdict FlowVerdict + + // Info includes information about the rule that matched or the error + // that occurred + Info string + + // Metadata is additional arbitrary metadata + Metadata []string + + // TransportProtocol identifies the flow's transport layer (layer 4) protocol + TransportProtocol TransportProtocol + + // FlowEvent identifies the flow event for L4 log record + FlowEvent FlowEvent + + // ServiceInfo identifies the Kubernetes service this flow went through. It is set to + // nil if the flow did not go though any service. Note that this field is always set to + // nil if ObservationPoint is Ingress since currently Cilium cannot tell at ingress + // whether the packet went through a service before. + ServiceInfo *ServiceInfo + + // DropReason indicates the reason of the drop. This field is set if and only if + // the Verdict field is set to VerdictDenied. Otherwise it's set to nil. + DropReason *DropReason + + // The following are the protocol specific parts. Only one of the + // following should ever be set. Unused fields will be omitted + + // HTTP contains information for HTTP request/responses + HTTP *LogRecordHTTP `json:"HTTP,omitempty"` + + // Kafka contains information for Kafka request/responses + Kafka *LogRecordKafka `json:"Kafka,omitempty"` + + // DNS contains information for DNS request/responses + DNS *LogRecordDNS `json:"DNS,omitempty"` + + // L7 contains information about generic L7 protocols + L7 *LogRecordL7 `json:"L7,omitempty"` +} + +// LogRecordHTTP contains the HTTP specific portion of a log record +type LogRecordHTTP struct { + // Code is the HTTP code being returned + Code int + + // Method is the method of the request + Method string + + // URL is the URL of the request + URL *url.URL + + // Protocol is the HTTP protocol in use + Protocol string + + // Headers are all HTTP headers present in the request and response. Request records + // contain request headers, while response headers contain response headers and the + // 'x-request-id' from the request headers, if any. If response headers already contain + // a 'x-request-id' with a different value then both will be included as two separate + // entries with the same key. + Headers http.Header + + // MissingHeaders are HTTP request headers that were deemed missing from the request + MissingHeaders http.Header + + // RejectedHeaders are HTTP request headers that were rejected from the request + RejectedHeaders http.Header +} + +// KafkaTopic contains the topic for requests +type KafkaTopic struct { + Topic string `json:"Topic,omitempty"` +} + +// LogRecordKafka contains the Kafka-specific portion of a log record +type LogRecordKafka struct { + // ErrorCode is the Kafka error code being returned + ErrorCode int + + // APIVersion of the Kafka api used + APIVersion int16 + + // APIKey for Kafka message + // Reference: https://kafka.apache.org/protocol#protocol_api_keys + APIKey string + + // CorrelationID is a user-supplied integer value that will be passed + // back with the response + CorrelationID int32 + + // Topic of the request, currently is a single topic + // Note that this string can be empty since not all messages use + // Topic. example: LeaveGroup, Heartbeat + Topic KafkaTopic +} + +type DNSDataSource string + +const ( + // DNSSourceProxy indicates that the DNS record was created by a proxy + // intercepting a DNS request/response. + DNSSourceProxy DNSDataSource = "proxy" +) + +// LogRecordDNS contains the DNS specific portion of a log record +type LogRecordDNS struct { + // Query is the name in the original query + Query string `json:"Query,omitempty"` + + // IPs are any IPs seen in this response. + // This field is filled only for DNS responses with IPs. + IPs []net.IP `json:"IPs,omitempty"` + + // TTL is the lowest applicable TTL for this data + // This field is filled only for DNS responses. + TTL uint32 `json:"TTL,omitempty"` + + // CNAMEs are any CNAME records seen in the response leading from Query + // to the IPs. + // This field is filled only for DNS responses with CNAMEs to IP data. + CNAMEs []string `json:"CNAMEs,omitempty"` + + // ObservationSource represents the source of the data in this LogRecordDNS. + // Empty or undefined may indicate older cilium versions, as it is expected + // to be filled in. + ObservationSource DNSDataSource `json:"ObservationSource,omitempty"` + + // RCode is the response code + // defined as per https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 + // Use github.com/cilium/dns.RcodeToString map to retrieve string representation + RCode int `json:"RCode,omitempty"` + + // QTypes are question types in DNS message + // https://www.ietf.org/rfc/rfc1035.txt + // Use github.com/cilium/dns.TypeToString map to retrieve string representation + QTypes []uint16 `json:"QTypes,omitempty"` + + // AnswerTypes are record types in the answer section + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4 + // Use github.com/cilium/dns.TypeToString map to retrieve string representation + AnswerTypes []uint16 `json:"AnswerTypes,omitempty"` +} + +// LogRecordL7 contains the generic L7 portion of a log record +type LogRecordL7 struct { + // Proto is the name of the protocol this record represents + Proto string `json:"Proto,omitempty"` + + // Fields is a map of key-value pairs describing the protocol + Fields map[string]string +} diff --git a/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go b/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go new file mode 100644 index 0000000000..9829484a18 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package rand + +import ( + "math/rand" + + "github.com/cilium/cilium/pkg/lock" +) + +// SafeRand is a concurrency-safe source of pseudo-random numbers. The Go +// stdlib's math/rand.Source is not concurrency-safe. The global source in +// math/rand would be concurrency safe (due to its internal use of +// lockedSource), but it is prone to inter-package interference with the PRNG +// state. +// Also see https://github.com/cilium/cilium/issues/10988 +type SafeRand struct { + mu lock.Mutex + r *rand.Rand +} + +func NewSafeRand(seed int64) *SafeRand { + return &SafeRand{r: rand.New(rand.NewSource(seed))} +} + +func (sr *SafeRand) Seed(seed int64) { + sr.mu.Lock() + sr.r.Seed(seed) + sr.mu.Unlock() +} + +func (sr *SafeRand) Int63() int64 { + sr.mu.Lock() + v := sr.r.Int63() + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Int63n(n int64) int64 { + sr.mu.Lock() + v := sr.r.Int63n(n) + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Uint32() uint32 { + sr.mu.Lock() + v := sr.r.Uint32() + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Uint64() uint64 { + sr.mu.Lock() + v := sr.r.Uint64() + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Intn(n int) int { + sr.mu.Lock() + v := sr.r.Intn(n) + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Float64() float64 { + sr.mu.Lock() + v := sr.r.Float64() + sr.mu.Unlock() + return v +} + +func (sr *SafeRand) Perm(n int) []int { + sr.mu.Lock() + v := sr.r.Perm(n) + sr.mu.Unlock() + return v + +} + +func (sr *SafeRand) Shuffle(n int, swap func(i, j int)) { + sr.mu.Lock() + sr.r.Shuffle(n, swap) + sr.mu.Unlock() +} diff --git a/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go b/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go new file mode 100644 index 0000000000..956a4b66e6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go @@ -0,0 +1,897 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package rate + +import ( + "context" + "fmt" + "math" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" + "golang.org/x/time/rate" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/time" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "rate") + +const ( + defaultMeanOver = 10 + defaultDelayedAdjustmentFactor = 0.50 + defaultMaxAdjustmentFactor = 100.0 + + // waitSemaphoreWeight is the maximum resolution of the wait semaphore, + // the higher this value, the more accurate the ParallelRequests + // requirement is implemented + waitSemaphoreResolution = 10000000 + + // logUUID is the UUID of the request. + logUUID = "uuid" + // logAPICallName is the name of the underlying API call, such as + // "endpoint-create". + logAPICallName = "name" + // logProcessingDuration is the time taken to perform the actual underlying + // API call such as creating an endpoint or deleting an endpoint. This is + // the time between when the request has finished waiting (or being + // delayed), to when the underlying action has finished. + logProcessingDuration = "processingDuration" + // logParallelRequests is the number of allowed parallel requests. See + // APILimiter.parallelRequests. + logParallelRequests = "parallelRequests" + // logMinWaitDuration represents APILimiterParameters.MinWaitDuration. + logMinWaitDuration = "minWaitDuration" + // logMaxWaitDuration represents APILimiterParameters.MaxWaitDuration. + logMaxWaitDuration = "maxWaitDuration" + // logMaxWaitDurationLimiter is the actual / calculated maximum threshold + // for a request to wait. Any request exceeding this threshold will not be + // processed. + logMaxWaitDurationLimiter = "maxWaitDurationLimiter" + // logWaitDurationLimit is the actual / calculated amount of time + // determined by the underlying rate-limiting library that this request + // must wait before the rate limiter releases it, so that it can take the + // underlying action. See golang.org/x/time/rate.(*Reservation).Delay(). + logWaitDurationLimit = "waitDurationLimiter" + // logWaitDurationTotal is the actual total amount of time that this + // request spent waiting to be released by the rate limiter. + logWaitDurationTotal = "waitDurationTotal" + // logLimit is the rate limit. See APILimiterParameters.RateLimit. + logLimit = "limit" + // logLimit is the burst rate. See APILimiterParameters.RateBurst. + logBurst = "burst" + // logTotalDuration is the total time between when the request was first + // scheduled (entered the rate limiter) to when it completed processing of + // the underlying action. This is the absolute total time of the request + // from beginning to end. + logTotalDuration = "totalDuration" + // logSkipped represents whether the rate limiter will skip rate-limiting + // this request. See APILimiterParameters.SkipInitial. + logSkipped = "rateLimiterSkipped" +) + +type outcome string + +const ( + outcomeParallelMaxWait outcome = "fail-parallel-wait" + outcomeLimitMaxWait outcome = "fail-limit-wait" + outcomeReqCancelled outcome = "request-cancelled" +) + +// APILimiter is an extension to x/time/rate.Limiter specifically for Cilium +// API calls. It allows to automatically adjust the rate, burst and maximum +// parallel API calls to stay as close as possible to an estimated processing +// time. +type APILimiter struct { + // name is the name of the API call. This field is immutable after + // NewAPILimiter() + name string + + // params is the parameters of the limiter. This field is immutable + // after NewAPILimiter() + params APILimiterParameters + + // metrics points to the metrics implementation provided by the caller + // of the APILimiter. This field is immutable after NewAPILimiter() + metrics MetricsObserver + + // mutex protects all fields below this line + mutex lock.RWMutex + + // meanProcessingDuration is the latest mean processing duration, + // calculated based on processingDurations + meanProcessingDuration float64 + + // processingDurations is the last params.MeanOver processing durations + processingDurations []time.Duration + + // meanWaitDuration is the latest mean wait duration, calculated based + // on waitDurations + meanWaitDuration float64 + + // waitDurations is the last params.MeanOver wait durations + waitDurations []time.Duration + + // parallelRequests is the currently allowed maximum parallel + // requests. This defaults to params.MaxParallel requests and is then + // adjusted automatically if params.AutoAdjust is enabled. + parallelRequests int + + // adjustmentFactor is the latest adjustment factor. It is the ratio + // between params.EstimatedProcessingDuration and + // meanProcessingDuration. + adjustmentFactor float64 + + // limiter is the rate limiter based on params.RateLimit and + // params.RateBurst. + limiter *rate.Limiter + + // currentRequestsInFlight is the number of parallel API requests + // currently in flight + currentRequestsInFlight int + + // requestsProcessed is the total number of processed requests + requestsProcessed int64 + + // requestsScheduled is the total number of scheduled requests + requestsScheduled int64 + + // parallelWaitSemaphore is the semaphore used to implement + // params.MaxParallel. It is initialized with a capacity of + // waitSemaphoreResolution and each API request will acquire + // waitSemaphoreResolution/params.MaxParallel tokens. + parallelWaitSemaphore *semaphore.Weighted +} + +// APILimiterParameters is the configuration of an APILimiter. The structure +// may not be mutated after it has been passed into NewAPILimiter(). +type APILimiterParameters struct { + // EstimatedProcessingDuration is the estimated duration an API call + // will take. This value is used if AutoAdjust is enabled to + // automatically adjust rate limits to stay as close as possible to the + // estimated processing duration. + EstimatedProcessingDuration time.Duration + + // AutoAdjust enables automatic adjustment of the values + // ParallelRequests, RateLimit, and RateBurst in order to keep the + // mean processing duration close to EstimatedProcessingDuration + AutoAdjust bool + + // MeanOver is the number of entries to keep in order to calculate the + // mean processing and wait duration + MeanOver int + + // ParallelRequests is the parallel requests allowed. If AutoAdjust is + // enabled, the value will adjust automatically. + ParallelRequests int + + // MaxParallelRequests is the maximum parallel requests allowed. If + // AutoAdjust is enabled, then the ParalelRequests will never grow + // above MaxParallelRequests. + MaxParallelRequests int + + // MinParallelRequests is the minimum parallel requests allowed. If + // AutoAdjust is enabled, then the ParallelRequests will never fall + // below MinParallelRequests. + MinParallelRequests int + + // RateLimit is the initial number of API requests allowed per second. + // If AutoAdjust is enabled, the value will adjust automatically. + RateLimit rate.Limit + + // RateBurst is the initial allowed burst of API requests allowed. If + // AutoAdjust is enabled, the value will adjust automatically. + RateBurst int + + // MinWaitDuration is the minimum time an API request always has to + // wait before the Wait() function returns an error. + MinWaitDuration time.Duration + + // MaxWaitDuration is the maximum time an API request is allowed to + // wait before the Wait() function returns an error. + MaxWaitDuration time.Duration + + // Log enables info logging of processed API requests. This should only + // be used for low frequency API calls. + Log bool + + // DelayedAdjustmentFactor is percentage of the AdjustmentFactor to be + // applied to RateBurst and MaxWaitDuration defined as a value between + // 0.0..1.0. This is used to steer a slower reaction of the RateBurst + // and ParallelRequests compared to RateLimit. + DelayedAdjustmentFactor float64 + + // SkipInitial is the number of initial API calls for which to not + // apply any rate limiting. This is useful to define a learning phase + // in the beginning to allow for auto adjustment before imposing wait + // durations and rate limiting on API calls. + SkipInitial int + + // MaxAdjustmentFactor is the maximum adjustment factor when AutoAdjust + // is enabled. Base values will not adjust more than by this factor. + MaxAdjustmentFactor float64 +} + +// MergeUserConfig merges the provided user configuration into the existing +// parameters and returns a new copy. +func (p APILimiterParameters) MergeUserConfig(config string) (APILimiterParameters, error) { + if err := (&p).mergeUserConfig(config); err != nil { + return APILimiterParameters{}, err + } + + return p, nil +} + +// NewAPILimiter returns a new APILimiter based on the parameters and metrics implementation +func NewAPILimiter(name string, p APILimiterParameters, metrics MetricsObserver) *APILimiter { + if p.MeanOver == 0 { + p.MeanOver = defaultMeanOver + } + + if p.MinParallelRequests == 0 { + p.MinParallelRequests = 1 + } + + if p.RateBurst == 0 { + p.RateBurst = 1 + } + + if p.DelayedAdjustmentFactor == 0.0 { + p.DelayedAdjustmentFactor = defaultDelayedAdjustmentFactor + } + + if p.MaxAdjustmentFactor == 0.0 { + p.MaxAdjustmentFactor = defaultMaxAdjustmentFactor + } + + l := &APILimiter{ + name: name, + params: p, + parallelRequests: p.ParallelRequests, + parallelWaitSemaphore: semaphore.NewWeighted(waitSemaphoreResolution), + metrics: metrics, + } + + if p.RateLimit != 0 { + l.limiter = rate.NewLimiter(p.RateLimit, p.RateBurst) + } + + return l +} + +// NewAPILimiterFromConfig returns a new APILimiter based on user configuration +func NewAPILimiterFromConfig(name, config string, metrics MetricsObserver) (*APILimiter, error) { + p := &APILimiterParameters{} + + if err := p.mergeUserConfig(config); err != nil { + return nil, err + } + + return NewAPILimiter(name, *p, metrics), nil +} + +func (p *APILimiterParameters) mergeUserConfigKeyValue(key, value string) error { + switch strings.ToLower(key) { + case "rate-limit": + limit, err := parseRate(value) + if err != nil { + return fmt.Errorf("unable to parse rate %q: %w", value, err) + } + p.RateLimit = limit + case "rate-burst": + burst, err := parsePositiveInt(value) + if err != nil { + return err + } + p.RateBurst = burst + case "min-wait-duration": + minWaitDuration, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("unable to parse duration %q: %w", value, err) + } + p.MinWaitDuration = minWaitDuration + case "max-wait-duration": + maxWaitDuration, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("unable to parse duration %q: %w", value, err) + } + p.MaxWaitDuration = maxWaitDuration + case "estimated-processing-duration": + estProcessingDuration, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("unable to parse duration %q: %w", value, err) + } + p.EstimatedProcessingDuration = estProcessingDuration + case "auto-adjust": + v, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("unable to parse bool %q: %w", value, err) + } + p.AutoAdjust = v + case "parallel-requests": + parallel, err := parsePositiveInt(value) + if err != nil { + return err + } + p.ParallelRequests = parallel + case "min-parallel-requests": + minParallel, err := parsePositiveInt(value) + if err != nil { + return err + } + p.MinParallelRequests = minParallel + case "max-parallel-requests": + maxParallel, err := parsePositiveInt(value) + if err != nil { + return err + } + p.MaxParallelRequests = int(maxParallel) + case "mean-over": + meanOver, err := parsePositiveInt(value) + if err != nil { + return err + } + p.MeanOver = meanOver + case "log": + v, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("unable to parse bool %q: %w", value, err) + } + p.Log = v + case "delayed-adjustment-factor": + delayedAdjustmentFactor, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("unable to parse float %q: %w", value, err) + } + p.DelayedAdjustmentFactor = delayedAdjustmentFactor + case "max-adjustment-factor": + maxAdjustmentFactor, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("unable to parse float %q: %w", value, err) + } + p.MaxAdjustmentFactor = maxAdjustmentFactor + case "skip-initial": + skipInitial, err := parsePositiveInt(value) + if err != nil { + return err + } + p.SkipInitial = skipInitial + default: + return fmt.Errorf("unknown rate limiting option %q", key) + } + + return nil +} + +func (p *APILimiterParameters) mergeUserConfig(config string) error { + tokens := strings.Split(config, ",") + for _, token := range tokens { + if token == "" { + continue + } + + t := strings.SplitN(token, ":", 2) + if len(t) != 2 { + return fmt.Errorf("unable to parse rate limit option %q, must in the form name=option:value[,option:value]", token) + } + + if err := p.mergeUserConfigKeyValue(t[0], t[1]); err != nil { + return fmt.Errorf("unable to parse rate limit option %q with value %q: %w", t[0], t[1], err) + } + } + + return nil +} + +func (l *APILimiter) Parameters() APILimiterParameters { + return l.params +} + +func (l *APILimiter) delayedAdjustment(current, min, max float64) (n float64) { + n = current * l.adjustmentFactor + n = current + ((n - current) * l.params.DelayedAdjustmentFactor) + if min > 0.0 && n < min { + n = min + } + if max > 0.0 && n > max { + n = max + } + return +} + +func (l *APILimiter) calculateAdjustmentFactor() float64 { + f := l.params.EstimatedProcessingDuration.Seconds() / l.meanProcessingDuration + if f > l.params.MaxAdjustmentFactor { + f = l.params.MaxAdjustmentFactor + } + if f < 1.0/l.params.MaxAdjustmentFactor { + f = 1.0 / l.params.MaxAdjustmentFactor + } + return f +} + +func (l *APILimiter) adjustmentLimit(newValue, initialValue float64) float64 { + return math.Max(initialValue/l.params.MaxAdjustmentFactor, math.Min(initialValue*l.params.MaxAdjustmentFactor, newValue)) +} + +func (l *APILimiter) adjustedBurst() int { + newBurst := l.delayedAdjustment(float64(l.params.RateBurst), float64(l.params.MinParallelRequests), 0.0) + return int(math.Round(l.adjustmentLimit(newBurst, float64(l.params.RateBurst)))) +} + +func (l *APILimiter) adjustedLimit() rate.Limit { + newLimit := rate.Limit(float64(l.params.RateLimit) * l.adjustmentFactor) + return rate.Limit(l.adjustmentLimit(float64(newLimit), float64(l.params.RateLimit))) +} + +func (l *APILimiter) adjustedParallelRequests() int { + newParallelRequests := l.delayedAdjustment(float64(l.params.ParallelRequests), + float64(l.params.MinParallelRequests), float64(l.params.MaxParallelRequests)) + return int(l.adjustmentLimit(newParallelRequests, float64(l.params.ParallelRequests))) +} + +func (l *APILimiter) requestFinished(r *limitedRequest, err error) { + if r.finished { + return + } + + r.finished = true + + var processingDuration time.Duration + if !r.startTime.IsZero() { + processingDuration = time.Since(r.startTime) + } + + totalDuration := time.Since(r.scheduleTime) + + scopedLog := log.WithFields(logrus.Fields{ + logAPICallName: l.name, + logUUID: r.uuid, + logProcessingDuration: processingDuration, + logTotalDuration: totalDuration, + logWaitDurationTotal: r.waitDuration, + }) + + if err != nil { + scopedLog = scopedLog.WithError(err) + } + + if l.params.Log { + scopedLog.Info("API call has been processed") + } else { + scopedLog.Debug("API call has been processed") + } + + if r.waitSemaphoreWeight != 0 { + l.parallelWaitSemaphore.Release(r.waitSemaphoreWeight) + } + + l.mutex.Lock() + + if !r.startTime.IsZero() { + l.requestsProcessed++ + l.currentRequestsInFlight-- + } + + // Only auto-adjust ratelimiter using metrics from successful API requests + if err == nil { + l.processingDurations = append(l.processingDurations, processingDuration) + if exceed := len(l.processingDurations) - l.params.MeanOver; exceed > 0 { + l.processingDurations = l.processingDurations[exceed:] + } + l.meanProcessingDuration = calcMeanDuration(l.processingDurations) + + l.waitDurations = append(l.waitDurations, r.waitDuration) + if exceed := len(l.waitDurations) - l.params.MeanOver; exceed > 0 { + l.waitDurations = l.waitDurations[exceed:] + } + l.meanWaitDuration = calcMeanDuration(l.waitDurations) + + if l.params.AutoAdjust && l.params.EstimatedProcessingDuration != 0 { + l.adjustmentFactor = l.calculateAdjustmentFactor() + l.parallelRequests = l.adjustedParallelRequests() + + if l.limiter != nil { + l.limiter.SetLimit(l.adjustedLimit()) + + newBurst := l.adjustedBurst() + l.limiter.SetBurst(newBurst) + } + } + } + + values := MetricsValues{ + EstimatedProcessingDuration: l.params.EstimatedProcessingDuration.Seconds(), + WaitDuration: r.waitDuration, + MaxWaitDuration: l.params.MaxWaitDuration, + MinWaitDuration: l.params.MinWaitDuration, + MeanProcessingDuration: l.meanProcessingDuration, + MeanWaitDuration: l.meanWaitDuration, + ParallelRequests: l.parallelRequests, + CurrentRequestsInFlight: l.currentRequestsInFlight, + AdjustmentFactor: l.adjustmentFactor, + Error: err, + Outcome: string(r.outcome), + } + + if l.limiter != nil { + values.Limit = l.limiter.Limit() + values.Burst = l.limiter.Burst() + } + l.mutex.Unlock() + + if l.metrics != nil { + l.metrics.ProcessedRequest(l.name, values) + } +} + +// calcMeanDuration returns the mean duration in seconds +func calcMeanDuration(durations []time.Duration) float64 { + total := 0.0 + for _, t := range durations { + total += t.Seconds() + } + return total / float64(len(durations)) +} + +// LimitedRequest represents a request that is being limited. It is returned +// by Wait() and the caller of Wait() is responsible to call Done() or Error() +// when the API call has been processed or resulted in an error. It is safe to +// call Error() and then Done(). It is not safe to call Done(), Error(), or +// WaitDuration() concurrently. +type LimitedRequest interface { + Done() + Error(err error) + WaitDuration() time.Duration +} + +type limitedRequest struct { + limiter *APILimiter + startTime time.Time + scheduleTime time.Time + waitDuration time.Duration + waitSemaphoreWeight int64 + uuid string + finished bool + outcome outcome +} + +// WaitDuration returns the duration the request had to wait +func (l *limitedRequest) WaitDuration() time.Duration { + return l.waitDuration +} + +// Done must be called when the API request has been successfully processed +func (l *limitedRequest) Done() { + l.limiter.requestFinished(l, nil) +} + +// Error must be called when the API request resulted in an error +func (l *limitedRequest) Error(err error) { + l.limiter.requestFinished(l, err) +} + +// Wait blocks until the next API call is allowed to be processed. If the +// configured MaxWaitDuration is exceeded, an error is returned. On success, a +// LimitedRequest is returned on which Done() must be called when the API call +// has completed or Error() if an error occurred. +func (l *APILimiter) Wait(ctx context.Context) (LimitedRequest, error) { + req, err := l.wait(ctx) + if err != nil { + l.requestFinished(req, err) + return nil, err + } + return req, nil +} + +// wait implements the API rate limiting delaying functionality. Every error +// message and corresponding log message are documented in +// Documentation/configuration/api-rate-limiting.rst. If any changes related to +// errors or log messages are made to this function, please update the +// aforementioned page as well. +func (l *APILimiter) wait(ctx context.Context) (req *limitedRequest, err error) { + var ( + limitWaitDuration time.Duration + r *rate.Reservation + ) + + req = &limitedRequest{ + limiter: l, + scheduleTime: time.Now(), + uuid: uuid.New().String(), + } + + l.mutex.Lock() + + l.requestsScheduled++ + + scopedLog := log.WithFields(logrus.Fields{ + logAPICallName: l.name, + logUUID: req.uuid, + logParallelRequests: l.parallelRequests, + }) + + if l.params.MaxWaitDuration > 0 { + scopedLog = scopedLog.WithField(logMaxWaitDuration, l.params.MaxWaitDuration) + } + + if l.params.MinWaitDuration > 0 { + scopedLog = scopedLog.WithField(logMinWaitDuration, l.params.MinWaitDuration) + } + + select { + case <-ctx.Done(): + if l.params.Log { + scopedLog.Warning("Not processing API request due to cancelled context") + } + l.mutex.Unlock() + req.outcome = outcomeReqCancelled + err = fmt.Errorf("request cancelled while waiting for rate limiting slot: %w", ctx.Err()) + return + default: + } + + skip := l.params.SkipInitial > 0 && l.requestsScheduled <= int64(l.params.SkipInitial) + if skip { + scopedLog = scopedLog.WithField(logSkipped, skip) + } + + parallelRequests := l.parallelRequests + meanProcessingDuration := l.meanProcessingDuration + l.mutex.Unlock() + + if l.params.Log { + scopedLog.Info("Processing API request with rate limiter") + } else { + scopedLog.Debug("Processing API request with rate limiter") + } + + if skip { + goto skipRateLimiter + } + + if parallelRequests > 0 { + waitCtx := ctx + if l.params.MaxWaitDuration > 0 { + ctx2, cancel := context.WithTimeout(ctx, l.params.MaxWaitDuration) + defer cancel() + waitCtx = ctx2 + } + w := int64(waitSemaphoreResolution / parallelRequests) + err2 := l.parallelWaitSemaphore.Acquire(waitCtx, w) + if err2 != nil { + if l.params.Log { + scopedLog.WithError(err2).Warning("Not processing API request. Wait duration for maximum parallel requests exceeds maximum") + } + req.outcome = outcomeParallelMaxWait + err = fmt.Errorf("timed out while waiting to be served with %d parallel requests: %w", parallelRequests, err2) + return + } + req.waitSemaphoreWeight = w + } + req.waitDuration = time.Since(req.scheduleTime) + + l.mutex.Lock() + if l.limiter != nil { + r = l.limiter.Reserve() + limitWaitDuration = r.Delay() + + scopedLog = scopedLog.WithFields(logrus.Fields{ + logLimit: fmt.Sprintf("%.2f/s", l.limiter.Limit()), + logBurst: l.limiter.Burst(), + logWaitDurationLimit: limitWaitDuration, + logMaxWaitDurationLimiter: l.params.MaxWaitDuration - req.waitDuration, + }) + } + l.mutex.Unlock() + + if l.params.MinWaitDuration > 0 && limitWaitDuration < l.params.MinWaitDuration { + limitWaitDuration = l.params.MinWaitDuration + } + + if (l.params.MaxWaitDuration > 0 && (limitWaitDuration+req.waitDuration) > l.params.MaxWaitDuration) || limitWaitDuration == rate.InfDuration { + if l.params.Log { + scopedLog.Warning("Not processing API request. Wait duration exceeds maximum") + } + + // The rate limiter should only consider a reservation valid if + // the request is actually processed. Cancellation of the + // reservation should happen before we sleep below. + if r != nil { + r.Cancel() + } + + // Instead of returning immediately, pace the caller by + // sleeping for the mean processing duration. This helps + // against callers who disrespect 429 error codes and retry + // immediately. + if meanProcessingDuration > 0.0 { + time.Sleep(time.Duration(meanProcessingDuration * float64(time.Second))) + } + + req.outcome = outcomeLimitMaxWait + err = fmt.Errorf("request would have to wait %v to be served (maximum wait duration: %v)", + limitWaitDuration, l.params.MaxWaitDuration-req.waitDuration) + return + } + + if limitWaitDuration != 0 { + select { + case <-time.After(limitWaitDuration): + case <-ctx.Done(): + if l.params.Log { + scopedLog.Warning("Not processing API request due to cancelled context while waiting") + } + // The rate limiter should only consider a reservation + // valid if the request is actually processed. + if r != nil { + r.Cancel() + } + + req.outcome = outcomeReqCancelled + err = fmt.Errorf("request cancelled while waiting for rate limiting slot: %w", ctx.Err()) + return + } + } + + req.waitDuration = time.Since(req.scheduleTime) + +skipRateLimiter: + + l.mutex.Lock() + l.currentRequestsInFlight++ + l.mutex.Unlock() + + scopedLog = scopedLog.WithField(logWaitDurationTotal, req.waitDuration) + + if l.params.Log { + scopedLog.Info("API request released by rate limiter") + } else { + scopedLog.Debug("API request released by rate limiter") + } + + req.startTime = time.Now() + return req, nil + +} + +func parseRate(r string) (rate.Limit, error) { + tokens := strings.SplitN(r, "/", 2) + if len(tokens) != 2 { + return 0, fmt.Errorf("not in the form number/interval") + } + + f, err := strconv.ParseFloat(tokens[0], 64) + if err != nil { + return 0, fmt.Errorf("unable to parse float %q: %w", tokens[0], err) + } + + // Reject rates such as 1/1 or 10/10 as it will default to nanoseconds + // which is likely unexpected to the user. Require an explicit suffix. + if _, err := strconv.ParseInt(string(tokens[1]), 10, 64); err == nil { + return 0, fmt.Errorf("interval %q must contain duration suffix", tokens[1]) + } + + // If duration is provided as "m" or "s", convert it into "1m" or "1s" + if _, err := strconv.ParseInt(string(tokens[1][0]), 10, 64); err != nil { + tokens[1] = "1" + tokens[1] + } + + d, err := time.ParseDuration(tokens[1]) + if err != nil { + return 0, fmt.Errorf("unable to parse duration %q: %w", tokens[1], err) + } + + return rate.Limit(f / d.Seconds()), nil +} + +// APILimiterSet is a set of APILimiter indexed by name +type APILimiterSet struct { + limiters map[string]*APILimiter + metrics MetricsObserver +} + +// MetricsValues is the snapshot of relevant values to feed into the +// MetricsObserver +type MetricsValues struct { + WaitDuration time.Duration + MinWaitDuration time.Duration + MaxWaitDuration time.Duration + Outcome string + MeanProcessingDuration float64 + MeanWaitDuration float64 + EstimatedProcessingDuration float64 + ParallelRequests int + Limit rate.Limit + Burst int + CurrentRequestsInFlight int + AdjustmentFactor float64 + Error error +} + +// MetricsObserver is the interface that must be implemented to extract metrics +type MetricsObserver interface { + // ProcessedRequest is invoked after invocation of an API call + ProcessedRequest(name string, values MetricsValues) +} + +// NewAPILimiterSet creates a new APILimiterSet based on a set of rate limiting +// configurations and the default configuration. Any rate limiter that is +// configured in the config OR the defaults will be configured and made +// available via the Limiter(name) and Wait() function. +func NewAPILimiterSet(config map[string]string, defaults map[string]APILimiterParameters, metrics MetricsObserver) (*APILimiterSet, error) { + limiters := map[string]*APILimiter{} + + for name, p := range defaults { + // Merge user config into defaults when provided + if userConfig, ok := config[name]; ok { + combinedParams, err := p.MergeUserConfig(userConfig) + if err != nil { + return nil, err + } + p = combinedParams + } + + limiters[name] = NewAPILimiter(name, p, metrics) + } + + for name, c := range config { + if _, ok := defaults[name]; !ok { + l, err := NewAPILimiterFromConfig(name, c, metrics) + if err != nil { + return nil, fmt.Errorf("unable to parse rate limiting configuration %s=%s: %w", name, c, err) + } + + limiters[name] = l + } + } + + return &APILimiterSet{ + limiters: limiters, + metrics: metrics, + }, nil +} + +// Limiter returns the APILimiter with a given name +func (s *APILimiterSet) Limiter(name string) *APILimiter { + return s.limiters[name] +} + +type dummyRequest struct{} + +func (d dummyRequest) WaitDuration() time.Duration { return 0 } +func (d dummyRequest) Done() {} +func (d dummyRequest) Error(err error) {} + +// Wait invokes Wait() on the APILimiter with the given name. If the limiter +// does not exist, a dummy limiter is used which will not impose any +// restrictions. +func (s *APILimiterSet) Wait(ctx context.Context, name string) (LimitedRequest, error) { + l, ok := s.limiters[name] + if !ok { + return dummyRequest{}, nil + } + + return l.Wait(ctx) +} + +// parsePositiveInt parses value as an int. It returns an error if value cannot +// be parsed or is negative. +func parsePositiveInt(value string) (int, error) { + switch i64, err := strconv.ParseInt(value, 10, 64); { + case err != nil: + return 0, fmt.Errorf("unable to parse positive integer %q: %v", value, err) + case i64 < 0: + return 0, fmt.Errorf("unable to parse positive integer %q: negative value", value) + case i64 > math.MaxInt: + return 0, fmt.Errorf("unable to parse positive integer %q: overflow", value) + default: + return int(i64), nil + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/rate/doc.go b/vendor/github.com/cilium/cilium/pkg/rate/doc.go new file mode 100644 index 0000000000..b031503aad --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/rate/doc.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package rate provides a rate limiter to rate limit requests that can be +// burstable but they should only allowed N per a period defined. +// This package differs from the "golang.org/x/time/rate" package as it does not +// implement the token bucket algorithm. +package rate diff --git a/vendor/github.com/cilium/cilium/pkg/rate/limiter.go b/vendor/github.com/cilium/cilium/pkg/rate/limiter.go new file mode 100644 index 0000000000..abb7be05ee --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/rate/limiter.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package rate + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "golang.org/x/sync/semaphore" +) + +// Limiter is used to limit the number of operations done. +type Limiter struct { + semaphore *semaphore.Weighted + burst int64 + currWeights atomic.Int64 + ticker *time.Ticker + cancelFunc context.CancelFunc + ctx context.Context +} + +// NewLimiter returns a new Limiter that allows events up to b tokens during +// the given interval. +// This Limiter has a different implementation from the 'x/time/rate's Limiter +// implementation. 'x/time/rate.Limiter' sends a constant stream of updates +// (at a rate of few dozen events per second) over the period of a N minutes +// which is the behavior of the token bucket algorithm. It is designed to +// flatten bursts in a signal to a fixed output rate. +// This rate.Limiter does the opposite of 'x/time/rate.Limiter'. It takes a +// somewhat fixed-rate stream of updates and turns it into a stream of +// controlled small bursts every N minutes. +func NewLimiter(interval time.Duration, b int64) *Limiter { + ticker := time.NewTicker(interval) + ctx, cancel := context.WithCancel(context.Background()) + l := &Limiter{ + semaphore: semaphore.NewWeighted(b), + burst: b, + ticker: ticker, + ctx: ctx, + cancelFunc: cancel, + } + go func() { + for { + select { + case <-ticker.C: + case <-l.ctx.Done(): + return + } + currWeights := l.currWeights.Swap(0) + l.semaphore.Release(currWeights) + } + }() + return l +} + +// Stop stops the internal components used for the rate limiter logic. +func (lim *Limiter) Stop() { + lim.cancelFunc() + lim.ticker.Stop() +} + +func (lim *Limiter) assertAlive() { + select { + case <-lim.ctx.Done(): + panic("limiter misuse: Allow / Wait / WaitN called concurrently after Stop") + default: + } +} + +// Allow is shorthand for AllowN(1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(1) +} + +// AllowN returns true if it's possible to allow n tokens. +func (lim *Limiter) AllowN(n int64) bool { + lim.assertAlive() + acq := lim.semaphore.TryAcquire(n) + if acq { + lim.currWeights.Add(n) + return true + } + return false +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) error { + return lim.WaitN(ctx, 1) +} + +// WaitN acquires n tokens, blocking until resources are available or ctx is +// done. On success, returns nil. On failure, returns ctx.Err() and leaves the +// limiter unchanged. +// +// If ctx is already done, WaitN may still succeed without blocking. +func (lim *Limiter) WaitN(ctx context.Context, n int64) error { + lim.assertAlive() + if n > lim.burst { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + err := lim.semaphore.Acquire(ctx, n) + if err != nil { + return err + } + lim.currWeights.Add(n) + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go new file mode 100644 index 0000000000..74764ed643 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/rate" +) + +func APILimiterObserver() rate.MetricsObserver { + return &apiRateLimitingMetrics{} +} + +type apiRateLimitingMetrics struct{} + +func (a *apiRateLimitingMetrics) ProcessedRequest(name string, v rate.MetricsValues) { + metrics.APILimiterProcessingDuration.WithLabelValues(name, "mean").Set(v.MeanProcessingDuration) + metrics.APILimiterProcessingDuration.WithLabelValues(name, "estimated").Set(v.EstimatedProcessingDuration) + metrics.APILimiterWaitDuration.WithLabelValues(name, "mean").Set(v.MeanWaitDuration) + metrics.APILimiterWaitDuration.WithLabelValues(name, "max").Set(v.MaxWaitDuration.Seconds()) + metrics.APILimiterWaitDuration.WithLabelValues(name, "min").Set(v.MinWaitDuration.Seconds()) + metrics.APILimiterRequestsInFlight.WithLabelValues(name, "in-flight").Set(float64(v.CurrentRequestsInFlight)) + metrics.APILimiterRequestsInFlight.WithLabelValues(name, "limit").Set(float64(v.ParallelRequests)) + metrics.APILimiterRateLimit.WithLabelValues(name, "limit").Set(float64(v.Limit)) + metrics.APILimiterRateLimit.WithLabelValues(name, "burst").Set(float64(v.Burst)) + metrics.APILimiterAdjustmentFactor.WithLabelValues(name).Set(v.AdjustmentFactor) + + if v.Outcome == "" { + metrics.APILimiterWaitHistoryDuration.WithLabelValues(name).Observe(v.WaitDuration.Seconds()) + v.Outcome = metrics.Error2Outcome(v.Error) + } + + metrics.APILimiterProcessedRequests.WithLabelValues(name, v.Outcome).Inc() +} diff --git a/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go b/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go new file mode 100644 index 0000000000..4eddccc414 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package safeio + +import ( + "fmt" + "io" +) + +// ErrLimitReached indicates that ReadAllLimit has +// reached its limit before completing a full read +// of the io.Reader. +var ErrLimitReached = fmt.Errorf("read limit reached") + +// ByteSize expresses the size of bytes +type ByteSize float64 + +const ( + _ = iota // ignore first value by assigning to blank identifier + // KB is a Kilobyte + KB ByteSize = 1 << (10 * iota) + // MB is a Megabyte + MB + // GB is a Gigabyte + GB + // TB is a Terabyte + TB + // PB is a Petabyte + PB + // EB is an Exabyte + EB + // ZB is a Zettabyte + ZB + // YB is a Yottabyte + YB +) + +// String converts a ByteSize to a string +func (b ByteSize) String() string { + switch { + case b >= YB: + return fmt.Sprintf("%.1fYB", b/YB) + case b >= ZB: + return fmt.Sprintf("%.1fZB", b/ZB) + case b >= EB: + return fmt.Sprintf("%.1fEB", b/EB) + case b >= PB: + return fmt.Sprintf("%.1fPB", b/PB) + case b >= TB: + return fmt.Sprintf("%.1fTB", b/TB) + case b >= GB: + return fmt.Sprintf("%.1fGB", b/GB) + case b >= MB: + return fmt.Sprintf("%.1fMB", b/MB) + case b >= KB: + return fmt.Sprintf("%.1fKB", b/KB) + } + return fmt.Sprintf("%.1fB", b) +} + +// ReadAllLimit reads from r until an error, EOF, or after n bytes and returns +// the data it read. A successful call returns err == nil, not err == EOF. +// Because ReadAllLimit is defined to read from src until EOF it does not +// treat an EOF from Read as an error to be reported. If the limit is reached +// ReadAllLimit will return ErrLimitReached as an error. +func ReadAllLimit(r io.Reader, n ByteSize) ([]byte, error) { + limit := int(n + 1) + buf, err := io.ReadAll(io.LimitReader(r, int64(limit))) + if err != nil { + return buf, err + } + if len(buf) >= limit { + return buf[:limit-1], ErrLimitReached + } + return buf, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/service/store/logfields.go b/vendor/github.com/cilium/cilium/pkg/service/store/logfields.go new file mode 100644 index 0000000000..7dc7953d68 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/service/store/logfields.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "service") diff --git a/vendor/github.com/cilium/cilium/pkg/service/store/store.go b/vendor/github.com/cilium/cilium/pkg/service/store/store.go new file mode 100644 index 0000000000..db007fac40 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/service/store/store.go @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package store + +import ( + "encoding/json" + "net/netip" + "path" + + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/kvstore/store" + "github.com/cilium/cilium/pkg/loadbalancer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/option" +) + +var ( + // ServiceStorePrefix is the kvstore prefix of the shared store + // + // WARNING - STABLE API: Changing the structure or values of this will + // break backwards compatibility + ServiceStorePrefix = path.Join(kvstore.BaseKeyPrefix, "state", "services", "v1") +) + +// ServiceMerger is the interface to be implemented by the owner of local +// services. The functions have to merge service updates and deletions with +// local services to provide a shared view. +type ServiceMerger interface { + MergeClusterServiceUpdate(service *ClusterService, swg *lock.StoppableWaitGroup) + MergeClusterServiceDelete(service *ClusterService, swg *lock.StoppableWaitGroup) +} + +// PortConfiguration is the L4 port configuration of a frontend or backend. The +// map is indexed by the name of the port and the value constains the L4 port +// and protocol. +// +// +deepequal-gen=true +type PortConfiguration map[string]*loadbalancer.L4Addr + +// ClusterService is the definition of a service in a cluster +// +// WARNING - STABLE API: Any change to this structure must be done in a +// backwards compatible way. +// +// +k8s:deepcopy-gen=true +type ClusterService struct { + // Cluster is the cluster name the service is configured in + Cluster string `json:"cluster"` + + // Namespace is the cluster namespace the service is configured in + Namespace string `json:"namespace"` + + // Name is the name of the service. It must be unique within the + // namespace of the cluster + Name string `json:"name"` + + // Frontends is a map indexed by the frontend IP address + Frontends map[string]PortConfiguration `json:"frontends"` + + // Backends is map indexed by the backend IP address + Backends map[string]PortConfiguration `json:"backends"` + + // Labels are the labels of the service + Labels map[string]string `json:"labels"` + + // Selector is the label selector used to select backends + Selector map[string]string `json:"selector"` + + // IncludeExternal is true when external endpoints from other clusters + // should be included + IncludeExternal bool `json:"includeExternal"` + + // Shared is true when the service should be exposed/shared to other clusters + Shared bool `json:"shared"` + + // ClusterID is the cluster ID the service is configured in + ClusterID uint32 `json:"clusterID"` +} + +func (s *ClusterService) String() string { + return s.Cluster + "/" + s.Namespace + "/" + s.Name +} + +// NamespaceServiceName returns the namespace and service name +func (s *ClusterService) NamespaceServiceName() string { + return s.Namespace + "/" + s.Name +} + +// GetKeyName returns the kvstore key to be used for the global service +func (s *ClusterService) GetKeyName() string { + // WARNING - STABLE API: Changing the structure of the key may break + // backwards compatibility + return path.Join(s.Cluster, s.Namespace, s.Name) +} + +// DeepKeyCopy creates a deep copy of the LocalKey +func (s *ClusterService) DeepKeyCopy() store.LocalKey { + return s.DeepCopy() +} + +// Marshal returns the global service object as JSON byte slice +func (s *ClusterService) Marshal() ([]byte, error) { + return json.Marshal(s) +} + +// Unmarshal parses the JSON byte slice and updates the global service receiver +func (s *ClusterService) Unmarshal(_ string, data []byte) error { + newService := NewClusterService("", "") + + if err := json.Unmarshal(data, &newService); err != nil { + return err + } + + if err := newService.validate(); err != nil { + return err + } + + *s = newService + + return nil +} + +func (s *ClusterService) validate() error { + // Skip the ClusterID check if it matches the local one, as we assume that + // it has already been validated, and to allow it to be zero. + if s.ClusterID != option.Config.ClusterID { + if err := cmtypes.ValidateClusterID(s.ClusterID); err != nil { + return err + } + } + + for address := range s.Frontends { + if _, err := netip.ParseAddr(address); err != nil { + return err + } + } + + for address := range s.Backends { + if _, err := netip.ParseAddr(address); err != nil { + return err + } + } + + return nil +} + +// NewClusterService returns a new cluster service definition +func NewClusterService(name, namespace string) ClusterService { + return ClusterService{ + Name: name, + Namespace: namespace, + Frontends: map[string]PortConfiguration{}, + Backends: map[string]PortConfiguration{}, + Labels: map[string]string{}, + Selector: map[string]string{}, + } +} + +type clusterServiceObserver struct { + // merger is the interface responsible to merge service and + // endpoints into an existing cache + merger ServiceMerger + + // swg provides a mechanism to know when the services were synchronized + // with the datapath. + swg *lock.StoppableWaitGroup +} + +// OnUpdate is called when a service in a remote cluster is updated +func (c *clusterServiceObserver) OnUpdate(key store.Key) { + if svc, ok := key.(*ClusterService); ok { + scopedLog := log.WithField(logfields.ServiceName, svc.String()) + scopedLog.Debugf("Update event of cluster service %#v", svc) + + c.merger.MergeClusterServiceUpdate(svc, c.swg) + } else { + log.Warningf("Received unexpected cluster service update object %+v", key) + } +} + +// OnDelete is called when a service in a remote cluster is deleted +func (c *clusterServiceObserver) OnDelete(key store.NamedKey) { + if svc, ok := key.(*ClusterService); ok { + scopedLog := log.WithField(logfields.ServiceName, svc.String()) + scopedLog.Debugf("Delete event of cluster service %#v", svc) + + c.merger.MergeClusterServiceDelete(svc, c.swg) + } else { + log.Warningf("Received unexpected cluster service delete object %+v", key) + } +} + +// JoinClusterServices starts a controller for syncing services from the kvstore +func JoinClusterServices(merger ServiceMerger, clusterName string) { + swg := lock.NewStoppableWaitGroup() + + log.Info("Enumerating cluster services") + // JoinSharedStore performs initial sync of services + _, err := store.JoinSharedStore(store.Configuration{ + Prefix: path.Join(ServiceStorePrefix, clusterName), + KeyCreator: func() store.Key { + return &ClusterService{} + }, + Observer: &clusterServiceObserver{ + merger: merger, + swg: swg, + }, + }) + if err != nil { + log.WithError(err).Fatal("Enumerating cluster services failed") + } + swg.Stop() +} diff --git a/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f08675bae1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepcopy.go @@ -0,0 +1,93 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package store + +import ( + loadbalancer "github.com/cilium/cilium/pkg/loadbalancer" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterService) DeepCopyInto(out *ClusterService) { + *out = *in + if in.Frontends != nil { + in, out := &in.Frontends, &out.Frontends + *out = make(map[string]PortConfiguration, len(*in)) + for key, val := range *in { + var outVal map[string]*loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PortConfiguration, len(*in)) + for key, val := range *in { + var outVal *loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(loadbalancer.L4Addr) + **out = **in + } + (*out)[key] = outVal + } + } + (*out)[key] = outVal + } + } + if in.Backends != nil { + in, out := &in.Backends, &out.Backends + *out = make(map[string]PortConfiguration, len(*in)) + for key, val := range *in { + var outVal map[string]*loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PortConfiguration, len(*in)) + for key, val := range *in { + var outVal *loadbalancer.L4Addr + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(loadbalancer.L4Addr) + **out = **in + } + (*out)[key] = outVal + } + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterService. +func (in *ClusterService) DeepCopy() *ClusterService { + if in == nil { + return nil + } + out := new(ClusterService) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepequal.go new file mode 100644 index 0000000000..d0b8a3bd22 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/service/store/zz_generated.deepequal.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package store + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *PortConfiguration) DeepEqual(other *PortConfiguration) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if !inValue.DeepEqual(otherValue) { + return false + } + } + } + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/sysctl/doc.go b/vendor/github.com/cilium/cilium/pkg/sysctl/doc.go new file mode 100644 index 0000000000..07b01e24a6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/sysctl/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package sysctl allows to change kernel parameters at runtime. +package sysctl diff --git a/vendor/github.com/cilium/cilium/pkg/sysctl/sysctl.go b/vendor/github.com/cilium/cilium/pkg/sysctl/sysctl.go new file mode 100644 index 0000000000..ea5bee3b03 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/sysctl/sysctl.go @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package sysctl + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +const ( + subsystem = "sysctl" + + procFsDefault = "/proc" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, subsystem) + + // parameterElemRx matches an element of a sysctl parameter. + parameterElemRx = regexp.MustCompile(`\A[-0-9_a-z]+\z`) + + procFsMU lock.Mutex + // procFsRead is mark as true if procFs changes value. + procFsRead bool + procFs = procFsDefault +) + +// An ErrInvalidSysctlParameter is returned when a parameter is invalid. +type ErrInvalidSysctlParameter string + +func (e ErrInvalidSysctlParameter) Error() string { + return fmt.Sprintf("invalid sysctl parameter: %q", string(e)) +} + +// Setting represents a sysctl setting. Its purpose it to be able to iterate +// over a slice of settings. +type Setting struct { + Name string + Val string + IgnoreErr bool + + // Warn if non-empty is the alternative warning log message to use when IgnoreErr is false. + Warn string +} + +// parameterPath returns the path to the sysctl file for parameter name. +func parameterPath(name string) (string, error) { + elems := strings.Split(name, ".") + for _, elem := range elems { + if !parameterElemRx.MatchString(elem) { + return "", ErrInvalidSysctlParameter(name) + } + } + return filepath.Join(append([]string{GetProcfs(), "sys"}, elems...)...), nil +} + +// SetProcfs sets path for the root's /proc. Calling it after GetProcfs causes +// panic. +func SetProcfs(path string) { + procFsMU.Lock() + defer procFsMU.Unlock() + if procFsRead { + // do not change the procfs after we have gotten its value from GetProcfs + panic("SetProcfs called after GetProcfs") + } + procFs = path +} + +// GetProcfs returns the path set in procFs. Executing SetProcFs after GetProcfs +// might panic depending. See SetProcfs for more info. +func GetProcfs() string { + procFsMU.Lock() + defer procFsMU.Unlock() + procFsRead = true + return procFs +} + +func writeSysctl(name string, value string) error { + path, err := parameterPath(name) + if err != nil { + return err + } + f, err := os.OpenFile(path, os.O_RDWR, 0644) + if err != nil { + return fmt.Errorf("could not open the sysctl file %s: %s", + path, err) + } + defer f.Close() + if _, err := io.WriteString(f, value); err != nil { + return fmt.Errorf("could not write to the systctl file %s: %s", + path, err) + } + return nil +} + +// Disable disables the given sysctl parameter. +func Disable(name string) error { + return writeSysctl(name, "0") +} + +// Enable enables the given sysctl parameter. +func Enable(name string) error { + return writeSysctl(name, "1") +} + +// Write writes the given sysctl parameter. +func Write(name string, val string) error { + return writeSysctl(name, val) +} + +// WriteInt writes the given integer type sysctl parameter. +func WriteInt(name string, val int64) error { + return writeSysctl(name, strconv.FormatInt(val, 10)) +} + +// Read reads the given sysctl parameter. +func Read(name string) (string, error) { + path, err := parameterPath(name) + if err != nil { + return "", err + } + val, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("Failed to read %s: %s", path, err) + } + + return strings.TrimRight(string(val), "\n"), nil +} + +// ReadInt reads the given sysctl parameter, return an int64 value. +func ReadInt(name string) (int64, error) { + s, err := Read(name) + if err != nil { + return -1, err + } + + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1, err + } + + return i, nil +} + +// ApplySettings applies all settings in sysSettings. +func ApplySettings(sysSettings []Setting) error { + for _, s := range sysSettings { + log.WithFields(logrus.Fields{ + logfields.SysParamName: s.Name, + logfields.SysParamValue: s.Val, + }).Info("Setting sysctl") + if err := Write(s.Name, s.Val); err != nil { + if !s.IgnoreErr || errors.Is(err, ErrInvalidSysctlParameter("")) { + return fmt.Errorf("Failed to sysctl -w %s=%s: %s", s.Name, s.Val, err) + } + + warn := "Failed to sysctl -w" + if s.Warn != "" { + warn = s.Warn + } + log.WithError(err).WithFields(logrus.Fields{ + logfields.SysParamName: s.Name, + logfields.SysParamValue: s.Val, + }).Warning(warn) + } + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/trigger/doc.go b/vendor/github.com/cilium/cilium/pkg/trigger/doc.go new file mode 100644 index 0000000000..7ca449cd3a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/trigger/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package trigger provides a mechanism to trigger actions that require to be +// serialized while providing a non-blocking notification mechanism +package trigger diff --git a/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go new file mode 100644 index 0000000000..7afae25872 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package trigger + +import ( + "fmt" + + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +// MetricsObserver is the interface a metrics collector has to implement in +// order to collect trigger metrics +type MetricsObserver interface { + // PostRun is called after a trigger run with the call duration, the + // latency between 1st queue request and the call run and the number of + // queued events folded into the last run + PostRun(callDuration, latency time.Duration, folds int) + + // QueueEvent is called when Trigger() is called to schedule a trigger + // run + QueueEvent(reason string) +} + +// Parameters are the user specified parameters +type Parameters struct { + // MinInterval is the minimum required interval between invocations of + // TriggerFunc + MinInterval time.Duration + + // TriggerFunc is the function to be called when Trigger() is called + // while respecting MinInterval and serialization + TriggerFunc func(reasons []string) + + // ShutdownFunc is called when the trigger is shut down + ShutdownFunc func() + + MetricsObserver MetricsObserver + + // Name is the unique name of the trigger. It must be provided in a + // format compatible to be used as prometheus name string. + Name string + + // sleepInterval controls the waiter sleep duration. This parameter is + // only exposed to tests + sleepInterval time.Duration +} + +type reasonStack map[string]struct{} + +func newReasonStack() reasonStack { + return map[string]struct{}{} +} + +func (r reasonStack) add(reason string) { + r[reason] = struct{}{} +} + +func (r reasonStack) slice() []string { + result := make([]string, len(r)) + i := 0 + for reason := range r { + result[i] = reason + i++ + } + return result +} + +// Trigger represents an active trigger logic. Use NewTrigger() to create a +// trigger +type Trigger struct { + // protect mutual access of 'trigger' between Trigger() and waiter() + mutex lock.Mutex + trigger bool + + // params are the user specified parameters + params Parameters + + // lastTrigger is the timestamp of the last invoked trigger + lastTrigger time.Time + + // wakeupCan is used to wake up the background trigger routine + wakeupChan chan struct{} + + // closeChan is used to stop the background trigger routine + closeChan chan struct{} + + // numFolds is the current count of folds that happened into the + // currently scheduled trigger + numFolds int + + // foldedReasons is the sum of all unique reasons folded together. + foldedReasons reasonStack + + waitStart time.Time +} + +// NewTrigger returns a new trigger based on the provided parameters +func NewTrigger(p Parameters) (*Trigger, error) { + if p.sleepInterval == 0 { + p.sleepInterval = time.Second + } + + if p.TriggerFunc == nil { + return nil, fmt.Errorf("trigger function is nil") + } + + t := &Trigger{ + params: p, + wakeupChan: make(chan struct{}, 1), + closeChan: make(chan struct{}, 1), + foldedReasons: newReasonStack(), + } + + // Guarantee that initial trigger has no delay + if p.MinInterval > time.Duration(0) { + t.lastTrigger = time.Now().Add(-1 * p.MinInterval) + } + + go t.waiter() + + return t, nil +} + +// needsDelay returns whether and how long of a delay is required to fullfil +// MinInterval +func (t *Trigger) needsDelay() (bool, time.Duration) { + if t.params.MinInterval == time.Duration(0) { + return false, 0 + } + + sleepTime := time.Since(t.lastTrigger.Add(t.params.MinInterval)) + return sleepTime < 0, sleepTime * -1 +} + +// Trigger triggers the call to TriggerFunc as specified in the parameters +// provided to NewTrigger(). It respects MinInterval and ensures that calls to +// TriggerFunc are serialized. This function is non-blocking and will return +// immediately before TriggerFunc is potentially triggered and has completed. +func (t *Trigger) TriggerWithReason(reason string) { + t.mutex.Lock() + t.trigger = true + if t.numFolds == 0 { + t.waitStart = time.Now() + } + t.numFolds++ + t.foldedReasons.add(reason) + t.mutex.Unlock() + + if t.params.MetricsObserver != nil { + t.params.MetricsObserver.QueueEvent(reason) + } + + select { + case t.wakeupChan <- struct{}{}: + default: + } +} + +// Trigger triggers the call to TriggerFunc as specified in the parameters +// provided to NewTrigger(). It respects MinInterval and ensures that calls to +// TriggerFunc are serialized. This function is non-blocking and will return +// immediately before TriggerFunc is potentially triggered and has completed. +func (t *Trigger) Trigger() { + t.TriggerWithReason("") +} + +// Shutdown stops the trigger mechanism +func (t *Trigger) Shutdown() { + close(t.closeChan) +} + +func (t *Trigger) waiter() { + sleepTimer, sleepTimerDone := inctimer.New() + defer sleepTimerDone() + for { + // keep critical section as small as possible + t.mutex.Lock() + triggerEnabled := t.trigger + t.trigger = false + t.mutex.Unlock() + + // run the trigger function + if triggerEnabled { + if delayNeeded, delay := t.needsDelay(); delayNeeded { + time.Sleep(delay) + } + + t.mutex.Lock() + t.lastTrigger = time.Now() + numFolds := t.numFolds + t.numFolds = 0 + reasons := t.foldedReasons.slice() + t.foldedReasons = newReasonStack() + callLatency := time.Since(t.waitStart) + t.mutex.Unlock() + + beforeTrigger := time.Now() + t.params.TriggerFunc(reasons) + + if t.params.MetricsObserver != nil { + callDuration := time.Since(beforeTrigger) + t.params.MetricsObserver.PostRun(callDuration, callLatency, numFolds) + } + } + + select { + case <-t.wakeupChan: + case <-sleepTimer.After(t.params.sleepInterval): + + case <-t.closeChan: + shutdownFunc := t.params.ShutdownFunc + if shutdownFunc != nil { + shutdownFunc() + } + return + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/types/ipv4.go b/vendor/github.com/cilium/cilium/pkg/types/ipv4.go new file mode 100644 index 0000000000..e45ca66ef8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/types/ipv4.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" + "net/netip" +) + +// IPv4 is the binary representation for encoding in binary structs. +type IPv4 [4]byte + +func (v4 IPv4) IsZero() bool { + return v4[0] == 0 && v4[1] == 0 && v4[2] == 0 && v4[3] == 0 +} + +func (v4 IPv4) IP() net.IP { + return v4[:] +} + +func (v4 IPv4) Addr() netip.Addr { + return netip.AddrFrom4(v4) +} + +func (v4 IPv4) String() string { + return v4.IP().String() +} + +// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (v4 *IPv4) DeepCopyInto(out *IPv4) { + copy(out[:], v4[:]) +} + +// FromAddr will populate the receiver with the specified address if and only +// if the provided address is a valid IPv4 address. Any other address, +// including the "invalid ip" value netip.Addr{} will zero the receiver. +func (v4 *IPv4) FromAddr(addr netip.Addr) { + if addr.Is4() { + a := IPv4(addr.As4()) + copy(v4[:], a[:]) + } else { + clear(v4[:]) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/types/ipv6.go b/vendor/github.com/cilium/cilium/pkg/types/ipv6.go new file mode 100644 index 0000000000..e740e19e8d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/types/ipv6.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" + "net/netip" +) + +// IPv6 is the binary representation for encoding in binary structs. +type IPv6 [16]byte + +func (v6 IPv6) IP() net.IP { + return v6[:] +} + +func (v6 IPv6) Addr() netip.Addr { + return netip.AddrFrom16(v6) +} + +func (v6 IPv6) String() string { + return v6.IP().String() +} + +// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (v6 *IPv6) DeepCopyInto(out *IPv6) { + copy(out[:], v6[:]) +} + +// FromAddr will populate the receiver with the specified address if and only +// if the provided address is a valid IPv6 address. Any other address, +// including the "invalid ip" value netip.Addr{} will zero the receiver. +func (v6 *IPv6) FromAddr(addr netip.Addr) { + if addr.Is6() { + a := IPv6(addr.As16()) + copy(v6[:], a[:]) + } else { + clear(v6[:]) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/types/macaddr.go b/vendor/github.com/cilium/cilium/pkg/types/macaddr.go new file mode 100644 index 0000000000..10470bd725 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/types/macaddr.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "net" +) + +// MACAddr is the binary representation for encoding in binary structs. +type MACAddr [6]byte + +func (addr MACAddr) hardwareAddr() net.HardwareAddr { + return addr[:] +} + +func (addr MACAddr) String() string { + return addr.hardwareAddr().String() +} + +// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (addr *MACAddr) DeepCopyInto(out *MACAddr) { + copy(out[:], addr[:]) +} diff --git a/vendor/github.com/cilium/cilium/pkg/types/portmap.go b/vendor/github.com/cilium/cilium/pkg/types/portmap.go new file mode 100644 index 0000000000..86ab9f4463 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/types/portmap.go @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "errors" + "fmt" + "strings" + + "github.com/cilium/cilium/pkg/counter" + "github.com/cilium/cilium/pkg/iana" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/u8proto" +) + +var ( + ErrNilMap = errors.New("nil map") + ErrUnknownNamedPort = errors.New("unknown named port") + ErrIncompatibleProtocol = errors.New("incompatible protocol") + ErrNamedPortIsZero = errors.New("named port is zero") + ErrDuplicateNamedPorts = errors.New("duplicate named ports") +) + +// PortProto is a pair of port number and protocol and is used as the +// value type in named port maps. +type PortProto struct { + Port uint16 // non-0 + Proto uint8 // 0 for any +} + +// NamedPortMap maps port names to port numbers and protocols. +type NamedPortMap map[string]PortProto + +// PortProtoSet is a reference-counted set of unique PortProto values. +type PortProtoSet counter.Counter[PortProto] + +// Equal returns true if the PortProtoSets are equal. +func (pps PortProtoSet) Equal(other PortProtoSet) bool { + if len(pps) != len(other) { + return false + } + + for port := range pps { + if _, exists := other[port]; !exists { + return false + } + } + return true +} + +// Add increments the reference count for the specified key. +func (pps PortProtoSet) Add(pp PortProto) bool { + return counter.Counter[PortProto](pps).Add(pp) +} + +// Delete decrements the reference count for the specified key. +func (pps PortProtoSet) Delete(pp PortProto) bool { + return counter.Counter[PortProto](pps).Delete(pp) +} + +// NamedPortMultiMap may have multiple entries for a name if multiple PODs +// define the same name with different values. +type NamedPortMultiMap interface { + // GetNamedPort returns the port number for the named port, if any. + GetNamedPort(name string, proto uint8) (uint16, error) + // Len returns the number of Name->PortProtoSet mappings known. + Len() int +} + +func NewNamedPortMultiMap() *namedPortMultiMap { + return &namedPortMultiMap{ + m: make(map[string]PortProtoSet), + } +} + +// Implements NamedPortMultiMap and allows changes through Update. All accesses +// must be protected by its RW mutex. +type namedPortMultiMap struct { + lock.RWMutex + m map[string]PortProtoSet +} + +func (npm *namedPortMultiMap) Len() int { + npm.RLock() + defer npm.RUnlock() + return len(npm.m) +} + +// Update applies potential changes in named ports, and returns whether there were any. +func (npm *namedPortMultiMap) Update(old, new NamedPortMap) (namedPortsChanged bool) { + npm.Lock() + defer npm.Unlock() + // The order is important here. Increment the refcount first, and then + // decrement it again for old ports, so that we don't hit zero if there are + // no changes. + for name, port := range new { + c, ok := npm.m[name] + if !ok { + c = make(PortProtoSet) + npm.m[name] = c + } + if c.Add(port) { + namedPortsChanged = true + } + } + for name, port := range old { + if npm.m[name].Delete(port) { + namedPortsChanged = true + if len(npm.m[name]) == 0 { + delete(npm.m, name) + } + } + } + return namedPortsChanged +} + +// ValidatePortName checks that the port name conforms to the IANA Service Names spec +// and converts the port name to lower case for case-insensitive comparisons. +func ValidatePortName(name string) (string, error) { + if !iana.IsSvcName(name) { // Port names are formatted as IANA Service Names + return "", fmt.Errorf("Invalid port name \"%s\", not using as a named port", name) + } + return strings.ToLower(name), nil // Normalize for case-insensitive comparison +} + +func newPortProto(port int, protocol string) (pp PortProto, err error) { + var u8p u8proto.U8proto + if protocol == "" { + u8p = u8proto.TCP // K8s ContainerPort protocol defaults to TCP + } else { + var err error + u8p, err = u8proto.ParseProtocol(protocol) + if err != nil { + return pp, err + } + } + if port < 1 || port > 65535 { + if port == 0 { + return pp, ErrNamedPortIsZero + } + return pp, fmt.Errorf("Port number %d out of 16-bit range", port) + } + return PortProto{ + Proto: uint8(u8p), + Port: uint16(port), + }, nil +} + +// AddPort adds a new PortProto to the NamedPortMap +func (npm NamedPortMap) AddPort(name string, port int, protocol string) error { + name, err := ValidatePortName(name) + if err != nil { + return err + } + pp, err := newPortProto(port, protocol) + if err != nil { + return err + } + npm[name] = pp + return nil +} + +// GetNamedPort returns the port number for the named port, if any. +func (npm NamedPortMap) GetNamedPort(name string, proto uint8) (uint16, error) { + if npm == nil { + return 0, ErrNilMap + } + pp, ok := npm[name] + if !ok { + return 0, ErrUnknownNamedPort + } + if pp.Proto != 0 && proto != pp.Proto { + return 0, ErrIncompatibleProtocol + } + if pp.Port == 0 { + return 0, ErrNamedPortIsZero + } + return pp.Port, nil +} + +// GetNamedPort returns the port number for the named port, if any. +func (npm *namedPortMultiMap) GetNamedPort(name string, proto uint8) (uint16, error) { + if npm == nil { + return 0, ErrNilMap + } + npm.RLock() + defer npm.RUnlock() + if npm.m == nil { + return 0, ErrNilMap + } + pps, ok := npm.m[name] + if !ok { + // Return an error the caller can filter out as this happens only for egress policy + // and it is likely the destination POD with the port name is simply not scheduled yet. + return 0, ErrUnknownNamedPort + } + // Find if there is a single port that has no proto conflict and no zero port value + port := uint16(0) + err := ErrUnknownNamedPort + for pp := range pps { + if pp.Proto != 0 && proto != pp.Proto { + err = ErrIncompatibleProtocol + continue // conflicting proto + } + if pp.Port == 0 { + err = ErrNamedPortIsZero + continue // zero port + } + if port != 0 && pp.Port != port { + return 0, ErrDuplicateNamedPorts + } + port = pp.Port + } + if port == 0 { + return 0, err + } + return port, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go new file mode 100644 index 0000000000..2df035a044 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package u8proto + +import ( + "fmt" + "strconv" + "strings" +) + +// These definitions must contain and be compatible with the string +// values defined for pkg/pollicy/api/L4Proto + +const ( + // ANY represents all protocols. + ANY U8proto = 0 + ICMP U8proto = 1 + TCP U8proto = 6 + UDP U8proto = 17 + ICMPv6 U8proto = 58 + SCTP U8proto = 132 +) + +var protoNames = map[U8proto]string{ + 0: "ANY", + 1: "ICMP", + 6: "TCP", + 17: "UDP", + 58: "ICMPv6", + 132: "SCTP", +} + +var ProtoIDs = map[string]U8proto{ + "all": 0, + "any": 0, + "icmp": 1, + "tcp": 6, + "udp": 17, + "icmpv6": 58, + "sctp": 132, +} + +type U8proto uint8 + +func (p U8proto) String() string { + if _, ok := protoNames[p]; ok { + return protoNames[p] + } + return strconv.Itoa(int(p)) +} + +func ParseProtocol(proto string) (U8proto, error) { + if u, ok := ProtoIDs[strings.ToLower(proto)]; ok { + return u, nil + } + return 0, fmt.Errorf("unknown protocol '%s'", proto) +} diff --git a/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go b/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go new file mode 100644 index 0000000000..a6fb646156 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Common WireGuard types and constants +package types + +const ( + // IfaceName is the name of the WireGuard tunnel device + IfaceName = "cilium_wg0" + // PrivKeyFilename is the name of the WireGuard private key file + PrivKeyFilename = "cilium_wg0.key" + // StaticEncryptKey is used in the IPCache to mark entries for which we + // want to enable WireGuard encryption + StaticEncryptKey = uint8(0xFF) +) diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go new file mode 100644 index 0000000000..89d60eec20 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go @@ -0,0 +1,19 @@ +// Copyright 2020 Authors of Cilium +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cilium + +// Add an exported type alias for L7 log entry oneof, so that the Go code does +// not need to know all the individual types +type IsLogEntry_L7 = isLogEntry_L7 diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go new file mode 100644 index 0000000000..f5ebaac5ad --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go @@ -0,0 +1,860 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/accesslog.proto + +package cilium + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type HttpProtocol int32 + +const ( + HttpProtocol_HTTP10 HttpProtocol = 0 + HttpProtocol_HTTP11 HttpProtocol = 1 + HttpProtocol_HTTP2 HttpProtocol = 2 +) + +// Enum value maps for HttpProtocol. +var ( + HttpProtocol_name = map[int32]string{ + 0: "HTTP10", + 1: "HTTP11", + 2: "HTTP2", + } + HttpProtocol_value = map[string]int32{ + "HTTP10": 0, + "HTTP11": 1, + "HTTP2": 2, + } +) + +func (x HttpProtocol) Enum() *HttpProtocol { + p := new(HttpProtocol) + *p = x + return p +} + +func (x HttpProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_cilium_api_accesslog_proto_enumTypes[0].Descriptor() +} + +func (HttpProtocol) Type() protoreflect.EnumType { + return &file_cilium_api_accesslog_proto_enumTypes[0] +} + +func (x HttpProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpProtocol.Descriptor instead. +func (HttpProtocol) EnumDescriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0} +} + +type EntryType int32 + +const ( + EntryType_Request EntryType = 0 + EntryType_Response EntryType = 1 + EntryType_Denied EntryType = 2 +) + +// Enum value maps for EntryType. +var ( + EntryType_name = map[int32]string{ + 0: "Request", + 1: "Response", + 2: "Denied", + } + EntryType_value = map[string]int32{ + "Request": 0, + "Response": 1, + "Denied": 2, + } +) + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} + +func (x EntryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EntryType) Descriptor() protoreflect.EnumDescriptor { + return file_cilium_api_accesslog_proto_enumTypes[1].Descriptor() +} + +func (EntryType) Type() protoreflect.EnumType { + return &file_cilium_api_accesslog_proto_enumTypes[1] +} + +func (x EntryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EntryType.Descriptor instead. +func (EntryType) EnumDescriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1} +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type HttpLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HttpProtocol HttpProtocol `protobuf:"varint,1,opt,name=http_protocol,json=httpProtocol,proto3,enum=cilium.HttpProtocol" json:"http_protocol,omitempty"` + // Request info that is also retained for the response + Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` // Envoy "x-forwarded-proto", e.g., "http", "https" + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // Envoy ":authority" header + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Envoy ":path" header + Method string `protobuf:"bytes,5,opt,name=method,proto3" json:"method,omitempty"` // Envoy ":method" header + // Request or response headers not included above + Headers []*KeyValue `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"` + // Response info + Status uint32 `protobuf:"varint,7,opt,name=status,proto3" json:"status,omitempty"` // Envoy ":status" header, zero for request + // missing_headers includes both headers that were added to the + // request, and headers that were merely logged as missing + MissingHeaders []*KeyValue `protobuf:"bytes,8,rep,name=missing_headers,json=missingHeaders,proto3" json:"missing_headers,omitempty"` + // rejected_headers includes headers that were flagged as unallowed, + // which may have been removed, or merely logged and the request still + // allowed, or the request may have been dropped due to them. + RejectedHeaders []*KeyValue `protobuf:"bytes,9,rep,name=rejected_headers,json=rejectedHeaders,proto3" json:"rejected_headers,omitempty"` +} + +func (x *HttpLogEntry) Reset() { + *x = HttpLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpLogEntry) ProtoMessage() {} + +func (x *HttpLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpLogEntry.ProtoReflect.Descriptor instead. +func (*HttpLogEntry) Descriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpLogEntry) GetHttpProtocol() HttpProtocol { + if x != nil { + return x.HttpProtocol + } + return HttpProtocol_HTTP10 +} + +func (x *HttpLogEntry) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *HttpLogEntry) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *HttpLogEntry) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HttpLogEntry) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *HttpLogEntry) GetHeaders() []*KeyValue { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpLogEntry) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *HttpLogEntry) GetMissingHeaders() []*KeyValue { + if x != nil { + return x.MissingHeaders + } + return nil +} + +func (x *HttpLogEntry) GetRejectedHeaders() []*KeyValue { + if x != nil { + return x.RejectedHeaders + } + return nil +} + +type KafkaLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // correlation_id is a user-supplied integer value that will be passed + // back with the response + CorrelationId int32 `protobuf:"varint,1,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + // error_code is the Kafka error code being returned + // Ref. https://kafka.apache.org/protocol#protocol_error_codes + ErrorCode int32 `protobuf:"varint,2,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // api_version of the Kafka api used + // Ref. https://kafka.apache.org/protocol#protocol_compatibility + ApiVersion int32 `protobuf:"varint,3,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + // api_key for Kafka message + // Reference: https://kafka.apache.org/protocol#protocol_api_keys + ApiKey int32 `protobuf:"varint,4,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` + // Topics of the request + // Optional, as not all messages have topics (ex. LeaveGroup, Heartbeat) + Topics []string `protobuf:"bytes,5,rep,name=topics,proto3" json:"topics,omitempty"` +} + +func (x *KafkaLogEntry) Reset() { + *x = KafkaLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KafkaLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KafkaLogEntry) ProtoMessage() {} + +func (x *KafkaLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KafkaLogEntry.ProtoReflect.Descriptor instead. +func (*KafkaLogEntry) Descriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *KafkaLogEntry) GetCorrelationId() int32 { + if x != nil { + return x.CorrelationId + } + return 0 +} + +func (x *KafkaLogEntry) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *KafkaLogEntry) GetApiVersion() int32 { + if x != nil { + return x.ApiVersion + } + return 0 +} + +func (x *KafkaLogEntry) GetApiKey() int32 { + if x != nil { + return x.ApiKey + } + return 0 +} + +func (x *KafkaLogEntry) GetTopics() []string { + if x != nil { + return x.Topics + } + return nil +} + +type L7LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proto string `protobuf:"bytes,1,opt,name=proto,proto3" json:"proto,omitempty"` + Fields map[string]string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *L7LogEntry) Reset() { + *x = L7LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *L7LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*L7LogEntry) ProtoMessage() {} + +func (x *L7LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use L7LogEntry.ProtoReflect.Descriptor instead. +func (*L7LogEntry) Descriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *L7LogEntry) GetProto() string { + if x != nil { + return x.Proto + } + return "" +} + +func (x *L7LogEntry) GetFields() map[string]string { + if x != nil { + return x.Fields + } + return nil +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time that Cilium filter captured this log entry, + // in, nanoseconds since 1/1/1970. + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // 'true' if the request was received by an ingress listener, + // 'false' if received by an egress listener + IsIngress bool `protobuf:"varint,15,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"` + EntryType EntryType `protobuf:"varint,3,opt,name=entry_type,json=entryType,proto3,enum=cilium.EntryType" json:"entry_type,omitempty"` + // Cilium network policy resource name + PolicyName string `protobuf:"bytes,4,opt,name=policy_name,json=policyName,proto3" json:"policy_name,omitempty"` + // proxy_id identifies the listener this message relates to, + // as configured via the bpf_metadata listener filter + ProxyId uint32 `protobuf:"varint,17,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + // Cilium rule reference + CiliumRuleRef string `protobuf:"bytes,5,opt,name=cilium_rule_ref,json=ciliumRuleRef,proto3" json:"cilium_rule_ref,omitempty"` + // Cilium security ID of the source and destination + SourceSecurityId uint32 `protobuf:"varint,6,opt,name=source_security_id,json=sourceSecurityId,proto3" json:"source_security_id,omitempty"` + DestinationSecurityId uint32 `protobuf:"varint,16,opt,name=destination_security_id,json=destinationSecurityId,proto3" json:"destination_security_id,omitempty"` + // These fields record the original source and destination addresses, + // stored in ipv4:port or [ipv6]:port format. + SourceAddress string `protobuf:"bytes,7,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` + DestinationAddress string `protobuf:"bytes,8,opt,name=destination_address,json=destinationAddress,proto3" json:"destination_address,omitempty"` + // Types that are assignable to L7: + // + // *LogEntry_Http + // *LogEntry_Kafka + // *LogEntry_GenericL7 + L7 isLogEntry_L7 `protobuf_oneof:"l7"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *LogEntry) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *LogEntry) GetIsIngress() bool { + if x != nil { + return x.IsIngress + } + return false +} + +func (x *LogEntry) GetEntryType() EntryType { + if x != nil { + return x.EntryType + } + return EntryType_Request +} + +func (x *LogEntry) GetPolicyName() string { + if x != nil { + return x.PolicyName + } + return "" +} + +func (x *LogEntry) GetProxyId() uint32 { + if x != nil { + return x.ProxyId + } + return 0 +} + +func (x *LogEntry) GetCiliumRuleRef() string { + if x != nil { + return x.CiliumRuleRef + } + return "" +} + +func (x *LogEntry) GetSourceSecurityId() uint32 { + if x != nil { + return x.SourceSecurityId + } + return 0 +} + +func (x *LogEntry) GetDestinationSecurityId() uint32 { + if x != nil { + return x.DestinationSecurityId + } + return 0 +} + +func (x *LogEntry) GetSourceAddress() string { + if x != nil { + return x.SourceAddress + } + return "" +} + +func (x *LogEntry) GetDestinationAddress() string { + if x != nil { + return x.DestinationAddress + } + return "" +} + +func (m *LogEntry) GetL7() isLogEntry_L7 { + if m != nil { + return m.L7 + } + return nil +} + +func (x *LogEntry) GetHttp() *HttpLogEntry { + if x, ok := x.GetL7().(*LogEntry_Http); ok { + return x.Http + } + return nil +} + +func (x *LogEntry) GetKafka() *KafkaLogEntry { + if x, ok := x.GetL7().(*LogEntry_Kafka); ok { + return x.Kafka + } + return nil +} + +func (x *LogEntry) GetGenericL7() *L7LogEntry { + if x, ok := x.GetL7().(*LogEntry_GenericL7); ok { + return x.GenericL7 + } + return nil +} + +type isLogEntry_L7 interface { + isLogEntry_L7() +} + +type LogEntry_Http struct { + Http *HttpLogEntry `protobuf:"bytes,100,opt,name=http,proto3,oneof"` +} + +type LogEntry_Kafka struct { + Kafka *KafkaLogEntry `protobuf:"bytes,101,opt,name=kafka,proto3,oneof"` +} + +type LogEntry_GenericL7 struct { + GenericL7 *L7LogEntry `protobuf:"bytes,102,opt,name=generic_l7,json=genericL7,proto3,oneof"` +} + +func (*LogEntry_Http) isLogEntry_L7() {} + +func (*LogEntry_Kafka) isLogEntry_L7() {} + +func (*LogEntry_GenericL7) isLogEntry_L7() {} + +var File_cilium_api_accesslog_proto protoreflect.FileDescriptor + +var file_cilium_api_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xdd, 0x02, 0x0a, 0x0c, 0x48, 0x74, 0x74, + 0x70, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0d, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x2a, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x39, 0x0a, 0x0f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x6c, 0x69, + 0x75, 0x6d, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x10, 0x72, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x0d, 0x4b, 0x61, 0x66, + 0x6b, 0x61, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, + 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x0a, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, + 0x2e, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, + 0x39, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x04, 0x0a, 0x08, 0x4c, + 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x49, 0x6e, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, + 0x6d, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x69, 0x6c, + 0x69, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x04, + 0x68, 0x74, 0x74, 0x70, 0x12, 0x2d, 0x0a, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x18, 0x65, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x61, 0x66, + 0x6b, 0x61, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, + 0x66, 0x6b, 0x61, 0x12, 0x33, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x6c, + 0x37, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, + 0x2e, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x37, 0x42, 0x04, 0x0a, 0x02, 0x6c, 0x37, 0x2a, 0x31, + 0x0a, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0a, + 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x30, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, + 0x54, 0x50, 0x31, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, + 0x02, 0x2a, 0x32, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6e, + 0x69, 0x65, 0x64, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, + 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_accesslog_proto_rawDescOnce sync.Once + file_cilium_api_accesslog_proto_rawDescData = file_cilium_api_accesslog_proto_rawDesc +) + +func file_cilium_api_accesslog_proto_rawDescGZIP() []byte { + file_cilium_api_accesslog_proto_rawDescOnce.Do(func() { + file_cilium_api_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_accesslog_proto_rawDescData) + }) + return file_cilium_api_accesslog_proto_rawDescData +} + +var file_cilium_api_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cilium_api_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_cilium_api_accesslog_proto_goTypes = []interface{}{ + (HttpProtocol)(0), // 0: cilium.HttpProtocol + (EntryType)(0), // 1: cilium.EntryType + (*KeyValue)(nil), // 2: cilium.KeyValue + (*HttpLogEntry)(nil), // 3: cilium.HttpLogEntry + (*KafkaLogEntry)(nil), // 4: cilium.KafkaLogEntry + (*L7LogEntry)(nil), // 5: cilium.L7LogEntry + (*LogEntry)(nil), // 6: cilium.LogEntry + nil, // 7: cilium.L7LogEntry.FieldsEntry +} +var file_cilium_api_accesslog_proto_depIdxs = []int32{ + 0, // 0: cilium.HttpLogEntry.http_protocol:type_name -> cilium.HttpProtocol + 2, // 1: cilium.HttpLogEntry.headers:type_name -> cilium.KeyValue + 2, // 2: cilium.HttpLogEntry.missing_headers:type_name -> cilium.KeyValue + 2, // 3: cilium.HttpLogEntry.rejected_headers:type_name -> cilium.KeyValue + 7, // 4: cilium.L7LogEntry.fields:type_name -> cilium.L7LogEntry.FieldsEntry + 1, // 5: cilium.LogEntry.entry_type:type_name -> cilium.EntryType + 3, // 6: cilium.LogEntry.http:type_name -> cilium.HttpLogEntry + 4, // 7: cilium.LogEntry.kafka:type_name -> cilium.KafkaLogEntry + 5, // 8: cilium.LogEntry.generic_l7:type_name -> cilium.L7LogEntry + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cilium_api_accesslog_proto_init() } +func file_cilium_api_accesslog_proto_init() { + if File_cilium_api_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KafkaLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*L7LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cilium_api_accesslog_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*LogEntry_Http)(nil), + (*LogEntry_Kafka)(nil), + (*LogEntry_GenericL7)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_accesslog_proto_rawDesc, + NumEnums: 2, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_accesslog_proto_goTypes, + DependencyIndexes: file_cilium_api_accesslog_proto_depIdxs, + EnumInfos: file_cilium_api_accesslog_proto_enumTypes, + MessageInfos: file_cilium_api_accesslog_proto_msgTypes, + }.Build() + File_cilium_api_accesslog_proto = out.File + file_cilium_api_accesslog_proto_rawDesc = nil + file_cilium_api_accesslog_proto_goTypes = nil + file_cilium_api_accesslog_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go new file mode 100644 index 0000000000..3a9c5b417a --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go @@ -0,0 +1,810 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/accesslog.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on KeyValue with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueMultiError, or nil +// if none found. +func (m *KeyValue) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return KeyValueMultiError(errors) + } + + return nil +} + +// KeyValueMultiError is an error wrapping multiple validation errors returned +// by KeyValue.ValidateAll() if the designated constraints aren't met. +type KeyValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueMultiError) AllErrors() []error { return m } + +// KeyValueValidationError is the validation error returned by +// KeyValue.Validate if the designated constraints aren't met. +type KeyValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueValidationError{} + +// Validate checks the field values on HttpLogEntry with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpLogEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpLogEntryMultiError, or +// nil if none found. +func (m *HttpLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for HttpProtocol + + // no validation rules for Scheme + + // no validation rules for Host + + // no validation rules for Path + + // no validation rules for Method + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpLogEntryValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Status + + for idx, item := range m.GetMissingHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("MissingHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("MissingHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpLogEntryValidationError{ + field: fmt.Sprintf("MissingHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRejectedHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("RejectedHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpLogEntryValidationError{ + field: fmt.Sprintf("RejectedHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpLogEntryValidationError{ + field: fmt.Sprintf("RejectedHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpLogEntryMultiError(errors) + } + + return nil +} + +// HttpLogEntryMultiError is an error wrapping multiple validation errors +// returned by HttpLogEntry.ValidateAll() if the designated constraints aren't met. +type HttpLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpLogEntryMultiError) AllErrors() []error { return m } + +// HttpLogEntryValidationError is the validation error returned by +// HttpLogEntry.Validate if the designated constraints aren't met. +type HttpLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpLogEntryValidationError) ErrorName() string { return "HttpLogEntryValidationError" } + +// Error satisfies the builtin error interface +func (e HttpLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpLogEntryValidationError{} + +// Validate checks the field values on KafkaLogEntry with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KafkaLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KafkaLogEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KafkaLogEntryMultiError, or +// nil if none found. +func (m *KafkaLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *KafkaLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for CorrelationId + + // no validation rules for ErrorCode + + // no validation rules for ApiVersion + + // no validation rules for ApiKey + + if len(errors) > 0 { + return KafkaLogEntryMultiError(errors) + } + + return nil +} + +// KafkaLogEntryMultiError is an error wrapping multiple validation errors +// returned by KafkaLogEntry.ValidateAll() if the designated constraints +// aren't met. +type KafkaLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KafkaLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KafkaLogEntryMultiError) AllErrors() []error { return m } + +// KafkaLogEntryValidationError is the validation error returned by +// KafkaLogEntry.Validate if the designated constraints aren't met. +type KafkaLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KafkaLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KafkaLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KafkaLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KafkaLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KafkaLogEntryValidationError) ErrorName() string { return "KafkaLogEntryValidationError" } + +// Error satisfies the builtin error interface +func (e KafkaLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKafkaLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KafkaLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KafkaLogEntryValidationError{} + +// Validate checks the field values on L7LogEntry with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *L7LogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on L7LogEntry with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in L7LogEntryMultiError, or +// nil if none found. +func (m *L7LogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *L7LogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Proto + + // no validation rules for Fields + + if len(errors) > 0 { + return L7LogEntryMultiError(errors) + } + + return nil +} + +// L7LogEntryMultiError is an error wrapping multiple validation errors +// returned by L7LogEntry.ValidateAll() if the designated constraints aren't met. +type L7LogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m L7LogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m L7LogEntryMultiError) AllErrors() []error { return m } + +// L7LogEntryValidationError is the validation error returned by +// L7LogEntry.Validate if the designated constraints aren't met. +type L7LogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e L7LogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e L7LogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e L7LogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e L7LogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e L7LogEntryValidationError) ErrorName() string { return "L7LogEntryValidationError" } + +// Error satisfies the builtin error interface +func (e L7LogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sL7LogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = L7LogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = L7LogEntryValidationError{} + +// Validate checks the field values on LogEntry with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogEntry with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogEntryMultiError, or nil +// if none found. +func (m *LogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *LogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Timestamp + + // no validation rules for IsIngress + + // no validation rules for EntryType + + // no validation rules for PolicyName + + // no validation rules for ProxyId + + // no validation rules for CiliumRuleRef + + // no validation rules for SourceSecurityId + + // no validation rules for DestinationSecurityId + + // no validation rules for SourceAddress + + // no validation rules for DestinationAddress + + switch v := m.L7.(type) { + case *LogEntry_Http: + if v == nil { + err := LogEntryValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHttp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LogEntryValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LogEntry_Kafka: + if v == nil { + err := LogEntryValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetKafka()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "Kafka", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "Kafka", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKafka()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LogEntryValidationError{ + field: "Kafka", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LogEntry_GenericL7: + if v == nil { + err := LogEntryValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGenericL7()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "GenericL7", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LogEntryValidationError{ + field: "GenericL7", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenericL7()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LogEntryValidationError{ + field: "GenericL7", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return LogEntryMultiError(errors) + } + + return nil +} + +// LogEntryMultiError is an error wrapping multiple validation errors returned +// by LogEntry.ValidateAll() if the designated constraints aren't met. +type LogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogEntryMultiError) AllErrors() []error { return m } + +// LogEntryValidationError is the validation error returned by +// LogEntry.Validate if the designated constraints aren't met. +type LogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogEntryValidationError) ErrorName() string { return "LogEntryValidationError" } + +// Error satisfies the builtin error interface +func (e LogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogEntryValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go new file mode 100644 index 0000000000..a8a62eec9c --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go @@ -0,0 +1,250 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/bpf_metadata.proto + +package cilium + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type BpfMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // File system root for bpf. Defaults to "/sys/fs/bpf" if left empty. + BpfRoot string `protobuf:"bytes,1,opt,name=bpf_root,json=bpfRoot,proto3" json:"bpf_root,omitempty"` + // 'true' if the filter is on ingress listener, 'false' for egress listener. + IsIngress bool `protobuf:"varint,2,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"` + // Use of the original source address requires kernel datapath support which + // may or may not be available. 'true' if original source address + // should be used. Original source address use may still be + // skipped in scenarios where it is knows to not work. + UseOriginalSourceAddress bool `protobuf:"varint,3,opt,name=use_original_source_address,json=useOriginalSourceAddress,proto3" json:"use_original_source_address,omitempty"` + // True if the listener is used for an L7 LB. In this case policy enforcement is done on the + // destination selected by the listener rather than on the original destination address. For + // local sources the source endpoint ID is set in socket mark instead of source security ID if + // 'use_original_source_address' is also true, so that the local source's egress policy is + // enforced on the bpf datapath. + // Only valid for egress. + IsL7Lb bool `protobuf:"varint,4,opt,name=is_l7lb,json=isL7lb,proto3" json:"is_l7lb,omitempty"` + // Source address to be used whenever the original source address is not used. + // Either ipv4_source_address or ipv6_source_address depending on the address + // family of the destination address. If left empty, and no Envoy Cluster Bind + // Config is provided, the source address will be picked by the local IP stack. + Ipv4SourceAddress string `protobuf:"bytes,5,opt,name=ipv4_source_address,json=ipv4SourceAddress,proto3" json:"ipv4_source_address,omitempty"` + Ipv6SourceAddress string `protobuf:"bytes,6,opt,name=ipv6_source_address,json=ipv6SourceAddress,proto3" json:"ipv6_source_address,omitempty"` + // True if policy should be enforced on l7 LB used. The policy bound to the configured + // ipv[46]_source_addresses, which must be explicitly set, applies. Ingress policy is + // enforced on the security identity of the original (e.g., external) source. Egress + // policy is enforced on the security identity of the backend selected by the load balancer. + // + // Deprecation note: This option will be forced 'true' and deprecated when Cilium 1.15 is + // the oldest supported release. + EnforcePolicyOnL7Lb bool `protobuf:"varint,7,opt,name=enforce_policy_on_l7lb,json=enforcePolicyOnL7lb,proto3" json:"enforce_policy_on_l7lb,omitempty"` + // proxy_id is passed to access log messages and allows relating access log messages to + // listeners. + ProxyId uint32 `protobuf:"varint,8,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` +} + +func (x *BpfMetadata) Reset() { + *x = BpfMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BpfMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BpfMetadata) ProtoMessage() {} + +func (x *BpfMetadata) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BpfMetadata.ProtoReflect.Descriptor instead. +func (*BpfMetadata) Descriptor() ([]byte, []int) { + return file_cilium_api_bpf_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *BpfMetadata) GetBpfRoot() string { + if x != nil { + return x.BpfRoot + } + return "" +} + +func (x *BpfMetadata) GetIsIngress() bool { + if x != nil { + return x.IsIngress + } + return false +} + +func (x *BpfMetadata) GetUseOriginalSourceAddress() bool { + if x != nil { + return x.UseOriginalSourceAddress + } + return false +} + +func (x *BpfMetadata) GetIsL7Lb() bool { + if x != nil { + return x.IsL7Lb + } + return false +} + +func (x *BpfMetadata) GetIpv4SourceAddress() string { + if x != nil { + return x.Ipv4SourceAddress + } + return "" +} + +func (x *BpfMetadata) GetIpv6SourceAddress() string { + if x != nil { + return x.Ipv6SourceAddress + } + return "" +} + +func (x *BpfMetadata) GetEnforcePolicyOnL7Lb() bool { + if x != nil { + return x.EnforcePolicyOnL7Lb + } + return false +} + +func (x *BpfMetadata) GetProxyId() uint32 { + if x != nil { + return x.ProxyId + } + return 0 +} + +var File_cilium_api_bpf_metadata_proto protoreflect.FileDescriptor + +var file_cilium_api_bpf_metadata_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x62, 0x70, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0xcf, 0x02, 0x0a, 0x0b, 0x42, 0x70, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x70, 0x66, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x70, 0x66, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x4f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x37, 0x6c, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x37, 0x6c, 0x62, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x70, 0x76, + 0x34, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x69, 0x70, 0x76, 0x34, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x70, 0x76, + 0x36, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x69, 0x70, 0x76, 0x36, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x6e, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x6e, 0x5f, 0x6c, + 0x37, 0x6c, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x6e, 0x4c, 0x37, 0x6c, 0x62, 0x12, 0x19, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, + 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cilium_api_bpf_metadata_proto_rawDescOnce sync.Once + file_cilium_api_bpf_metadata_proto_rawDescData = file_cilium_api_bpf_metadata_proto_rawDesc +) + +func file_cilium_api_bpf_metadata_proto_rawDescGZIP() []byte { + file_cilium_api_bpf_metadata_proto_rawDescOnce.Do(func() { + file_cilium_api_bpf_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_bpf_metadata_proto_rawDescData) + }) + return file_cilium_api_bpf_metadata_proto_rawDescData +} + +var file_cilium_api_bpf_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_cilium_api_bpf_metadata_proto_goTypes = []interface{}{ + (*BpfMetadata)(nil), // 0: cilium.BpfMetadata +} +var file_cilium_api_bpf_metadata_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cilium_api_bpf_metadata_proto_init() } +func file_cilium_api_bpf_metadata_proto_init() { + if File_cilium_api_bpf_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_bpf_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BpfMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_bpf_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_bpf_metadata_proto_goTypes, + DependencyIndexes: file_cilium_api_bpf_metadata_proto_depIdxs, + MessageInfos: file_cilium_api_bpf_metadata_proto_msgTypes, + }.Build() + File_cilium_api_bpf_metadata_proto = out.File + file_cilium_api_bpf_metadata_proto_rawDesc = nil + file_cilium_api_bpf_metadata_proto_goTypes = nil + file_cilium_api_bpf_metadata_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go new file mode 100644 index 0000000000..2c3fff7c35 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/bpf_metadata.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on BpfMetadata with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *BpfMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BpfMetadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BpfMetadataMultiError, or +// nil if none found. +func (m *BpfMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *BpfMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BpfRoot + + // no validation rules for IsIngress + + // no validation rules for UseOriginalSourceAddress + + // no validation rules for IsL7Lb + + // no validation rules for Ipv4SourceAddress + + // no validation rules for Ipv6SourceAddress + + // no validation rules for EnforcePolicyOnL7Lb + + // no validation rules for ProxyId + + if len(errors) > 0 { + return BpfMetadataMultiError(errors) + } + + return nil +} + +// BpfMetadataMultiError is an error wrapping multiple validation errors +// returned by BpfMetadata.ValidateAll() if the designated constraints aren't met. +type BpfMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BpfMetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BpfMetadataMultiError) AllErrors() []error { return m } + +// BpfMetadataValidationError is the validation error returned by +// BpfMetadata.Validate if the designated constraints aren't met. +type BpfMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BpfMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BpfMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BpfMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BpfMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BpfMetadataValidationError) ErrorName() string { return "BpfMetadataValidationError" } + +// Error satisfies the builtin error interface +func (e BpfMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBpfMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BpfMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BpfMetadataValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go new file mode 100644 index 0000000000..f96b0168db --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/health_check_sink.proto + +package cilium + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Health check event pipe sink. +// The health check event will be streamed as binary protobufs. +type HealthCheckEventPipeSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unix domain socket path where to connect to send health check events to. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *HealthCheckEventPipeSink) Reset() { + *x = HealthCheckEventPipeSink{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_health_check_sink_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckEventPipeSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckEventPipeSink) ProtoMessage() {} + +func (x *HealthCheckEventPipeSink) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_health_check_sink_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckEventPipeSink.ProtoReflect.Descriptor instead. +func (*HealthCheckEventPipeSink) Descriptor() ([]byte, []int) { + return file_cilium_api_health_check_sink_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckEventPipeSink) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_cilium_api_health_check_sink_proto protoreflect.FileDescriptor + +var file_cilium_api_health_check_sink_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x37, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x69, 0x70, 0x65, 0x53, 0x69, 0x6e, + 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x2e, + 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, + 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, + 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_health_check_sink_proto_rawDescOnce sync.Once + file_cilium_api_health_check_sink_proto_rawDescData = file_cilium_api_health_check_sink_proto_rawDesc +) + +func file_cilium_api_health_check_sink_proto_rawDescGZIP() []byte { + file_cilium_api_health_check_sink_proto_rawDescOnce.Do(func() { + file_cilium_api_health_check_sink_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_health_check_sink_proto_rawDescData) + }) + return file_cilium_api_health_check_sink_proto_rawDescData +} + +var file_cilium_api_health_check_sink_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_cilium_api_health_check_sink_proto_goTypes = []interface{}{ + (*HealthCheckEventPipeSink)(nil), // 0: cilium.HealthCheckEventPipeSink +} +var file_cilium_api_health_check_sink_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cilium_api_health_check_sink_proto_init() } +func file_cilium_api_health_check_sink_proto_init() { + if File_cilium_api_health_check_sink_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_health_check_sink_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheckEventPipeSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_health_check_sink_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_health_check_sink_proto_goTypes, + DependencyIndexes: file_cilium_api_health_check_sink_proto_depIdxs, + MessageInfos: file_cilium_api_health_check_sink_proto_msgTypes, + }.Build() + File_cilium_api_health_check_sink_proto = out.File + file_cilium_api_health_check_sink_proto_rawDesc = nil + file_cilium_api_health_check_sink_proto_goTypes = nil + file_cilium_api_health_check_sink_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go new file mode 100644 index 0000000000..5931df1ba5 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go @@ -0,0 +1,149 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/health_check_sink.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HealthCheckEventPipeSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheckEventPipeSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheckEventPipeSink with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheckEventPipeSinkMultiError, or nil if none found. +func (m *HealthCheckEventPipeSink) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheckEventPipeSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := HealthCheckEventPipeSinkValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheckEventPipeSinkMultiError(errors) + } + + return nil +} + +// HealthCheckEventPipeSinkMultiError is an error wrapping multiple validation +// errors returned by HealthCheckEventPipeSink.ValidateAll() if the designated +// constraints aren't met. +type HealthCheckEventPipeSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheckEventPipeSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheckEventPipeSinkMultiError) AllErrors() []error { return m } + +// HealthCheckEventPipeSinkValidationError is the validation error returned by +// HealthCheckEventPipeSink.Validate if the designated constraints aren't met. +type HealthCheckEventPipeSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheckEventPipeSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheckEventPipeSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheckEventPipeSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheckEventPipeSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheckEventPipeSinkValidationError) ErrorName() string { + return "HealthCheckEventPipeSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheckEventPipeSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheckEventPipeSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheckEventPipeSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheckEventPipeSinkValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go new file mode 100644 index 0000000000..eccf771091 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/l7policy.proto + +package cilium + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type L7Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path to the unix domain socket for the cilium access log. + AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` + // HTTP response body message for 403 status code. + // If empty, "Access denied" will be used. + Denied_403Body string `protobuf:"bytes,3,opt,name=denied_403_body,json=denied403Body,proto3" json:"denied_403_body,omitempty"` +} + +func (x *L7Policy) Reset() { + *x = L7Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_l7policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *L7Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*L7Policy) ProtoMessage() {} + +func (x *L7Policy) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_l7policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use L7Policy.ProtoReflect.Descriptor instead. +func (*L7Policy) Descriptor() ([]byte, []int) { + return file_cilium_api_l7policy_proto_rawDescGZIP(), []int{0} +} + +func (x *L7Policy) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +func (x *L7Policy) GetDenied_403Body() string { + if x != nil { + return x.Denied_403Body + } + return "" +} + +var File_cilium_api_l7policy_proto protoreflect.FileDescriptor + +var file_cilium_api_l7policy_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x37, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, + 0x69, 0x75, 0x6d, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x08, 0x4c, 0x37, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x65, 0x6e, 0x69, 0x65, + 0x64, 0x5f, 0x34, 0x30, 0x33, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x34, 0x30, 0x33, 0x42, 0x6f, 0x64, 0x79, 0x42, + 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_l7policy_proto_rawDescOnce sync.Once + file_cilium_api_l7policy_proto_rawDescData = file_cilium_api_l7policy_proto_rawDesc +) + +func file_cilium_api_l7policy_proto_rawDescGZIP() []byte { + file_cilium_api_l7policy_proto_rawDescOnce.Do(func() { + file_cilium_api_l7policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_l7policy_proto_rawDescData) + }) + return file_cilium_api_l7policy_proto_rawDescData +} + +var file_cilium_api_l7policy_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_cilium_api_l7policy_proto_goTypes = []interface{}{ + (*L7Policy)(nil), // 0: cilium.L7Policy +} +var file_cilium_api_l7policy_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cilium_api_l7policy_proto_init() } +func file_cilium_api_l7policy_proto_init() { + if File_cilium_api_l7policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_l7policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*L7Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_l7policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_l7policy_proto_goTypes, + DependencyIndexes: file_cilium_api_l7policy_proto_depIdxs, + MessageInfos: file_cilium_api_l7policy_proto_msgTypes, + }.Build() + File_cilium_api_l7policy_proto = out.File + file_cilium_api_l7policy_proto_rawDesc = nil + file_cilium_api_l7policy_proto_goTypes = nil + file_cilium_api_l7policy_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go new file mode 100644 index 0000000000..a9b1cc932c --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/l7policy.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on L7Policy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *L7Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on L7Policy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in L7PolicyMultiError, or nil +// if none found. +func (m *L7Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *L7Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AccessLogPath + + // no validation rules for Denied_403Body + + if len(errors) > 0 { + return L7PolicyMultiError(errors) + } + + return nil +} + +// L7PolicyMultiError is an error wrapping multiple validation errors returned +// by L7Policy.ValidateAll() if the designated constraints aren't met. +type L7PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m L7PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m L7PolicyMultiError) AllErrors() []error { return m } + +// L7PolicyValidationError is the validation error returned by +// L7Policy.Validate if the designated constraints aren't met. +type L7PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e L7PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e L7PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e L7PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e L7PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e L7PolicyValidationError) ErrorName() string { return "L7PolicyValidationError" } + +// Error satisfies the builtin error interface +func (e L7PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sL7Policy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = L7PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = L7PolicyValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go new file mode 100644 index 0000000000..4868b7f632 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go @@ -0,0 +1,183 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/network_filter.proto + +package cilium + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type NetworkFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path to the proxylib to be opened + Proxylib string `protobuf:"bytes,1,opt,name=proxylib,proto3" json:"proxylib,omitempty"` + // Transparent set of parameters provided for proxylib initialization + ProxylibParams map[string]string `protobuf:"bytes,2,rep,name=proxylib_params,json=proxylibParams,proto3" json:"proxylib_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Path to the unix domain socket for the cilium access log. + AccessLogPath string `protobuf:"bytes,5,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` +} + +func (x *NetworkFilter) Reset() { + *x = NetworkFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_network_filter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkFilter) ProtoMessage() {} + +func (x *NetworkFilter) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_network_filter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkFilter.ProtoReflect.Descriptor instead. +func (*NetworkFilter) Descriptor() ([]byte, []int) { + return file_cilium_api_network_filter_proto_rawDescGZIP(), []int{0} +} + +func (x *NetworkFilter) GetProxylib() string { + if x != nil { + return x.Proxylib + } + return "" +} + +func (x *NetworkFilter) GetProxylibParams() map[string]string { + if x != nil { + return x.ProxylibParams + } + return nil +} + +func (x *NetworkFilter) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +var File_cilium_api_network_filter_proto protoreflect.FileDescriptor + +var file_cilium_api_network_filter_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0xea, 0x01, 0x0a, 0x0d, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x12, 0x52, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x6c, 0x69, 0x62, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x6c, 0x69, 0x62, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, + 0x61, 0x74, 0x68, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_network_filter_proto_rawDescOnce sync.Once + file_cilium_api_network_filter_proto_rawDescData = file_cilium_api_network_filter_proto_rawDesc +) + +func file_cilium_api_network_filter_proto_rawDescGZIP() []byte { + file_cilium_api_network_filter_proto_rawDescOnce.Do(func() { + file_cilium_api_network_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_network_filter_proto_rawDescData) + }) + return file_cilium_api_network_filter_proto_rawDescData +} + +var file_cilium_api_network_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cilium_api_network_filter_proto_goTypes = []interface{}{ + (*NetworkFilter)(nil), // 0: cilium.NetworkFilter + nil, // 1: cilium.NetworkFilter.ProxylibParamsEntry +} +var file_cilium_api_network_filter_proto_depIdxs = []int32{ + 1, // 0: cilium.NetworkFilter.proxylib_params:type_name -> cilium.NetworkFilter.ProxylibParamsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_cilium_api_network_filter_proto_init() } +func file_cilium_api_network_filter_proto_init() { + if File_cilium_api_network_filter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_network_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_network_filter_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_network_filter_proto_goTypes, + DependencyIndexes: file_cilium_api_network_filter_proto_depIdxs, + MessageInfos: file_cilium_api_network_filter_proto_msgTypes, + }.Build() + File_cilium_api_network_filter_proto = out.File + file_cilium_api_network_filter_proto_rawDesc = nil + file_cilium_api_network_filter_proto_goTypes = nil + file_cilium_api_network_filter_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go new file mode 100644 index 0000000000..fec4b8b719 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/network_filter.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on NetworkFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *NetworkFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NetworkFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in NetworkFilterMultiError, or +// nil if none found. +func (m *NetworkFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *NetworkFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Proxylib + + // no validation rules for ProxylibParams + + // no validation rules for AccessLogPath + + if len(errors) > 0 { + return NetworkFilterMultiError(errors) + } + + return nil +} + +// NetworkFilterMultiError is an error wrapping multiple validation errors +// returned by NetworkFilter.ValidateAll() if the designated constraints +// aren't met. +type NetworkFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NetworkFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NetworkFilterMultiError) AllErrors() []error { return m } + +// NetworkFilterValidationError is the validation error returned by +// NetworkFilter.Validate if the designated constraints aren't met. +type NetworkFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NetworkFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NetworkFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NetworkFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NetworkFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NetworkFilterValidationError) ErrorName() string { return "NetworkFilterValidationError" } + +// Error satisfies the builtin error interface +func (e NetworkFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNetworkFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NetworkFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NetworkFilterValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go new file mode 100644 index 0000000000..fc72a8019b --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go @@ -0,0 +1,1772 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/npds.proto + +package cilium + +import ( + context "context" + _ "github.com/cilium/proxy/go/envoy/annotations" + v3 "github.com/cilium/proxy/go/envoy/config/core/v3" + v31 "github.com/cilium/proxy/go/envoy/config/route/v3" + v33 "github.com/cilium/proxy/go/envoy/service/discovery/v3" + v32 "github.com/cilium/proxy/go/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Action specifies what to do when the header matches. +type HeaderMatch_MatchAction int32 + +const ( + HeaderMatch_CONTINUE_ON_MATCH HeaderMatch_MatchAction = 0 // Keep checking other matches (default) + HeaderMatch_FAIL_ON_MATCH HeaderMatch_MatchAction = 1 // Drop the request if no other rule matches + HeaderMatch_DELETE_ON_MATCH HeaderMatch_MatchAction = 2 // Remove the whole matching header +) + +// Enum value maps for HeaderMatch_MatchAction. +var ( + HeaderMatch_MatchAction_name = map[int32]string{ + 0: "CONTINUE_ON_MATCH", + 1: "FAIL_ON_MATCH", + 2: "DELETE_ON_MATCH", + } + HeaderMatch_MatchAction_value = map[string]int32{ + "CONTINUE_ON_MATCH": 0, + "FAIL_ON_MATCH": 1, + "DELETE_ON_MATCH": 2, + } +) + +func (x HeaderMatch_MatchAction) Enum() *HeaderMatch_MatchAction { + p := new(HeaderMatch_MatchAction) + *p = x + return p +} + +func (x HeaderMatch_MatchAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderMatch_MatchAction) Descriptor() protoreflect.EnumDescriptor { + return file_cilium_api_npds_proto_enumTypes[0].Descriptor() +} + +func (HeaderMatch_MatchAction) Type() protoreflect.EnumType { + return &file_cilium_api_npds_proto_enumTypes[0] +} + +func (x HeaderMatch_MatchAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderMatch_MatchAction.Descriptor instead. +func (HeaderMatch_MatchAction) EnumDescriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 0} +} + +type HeaderMatch_MismatchAction int32 + +const ( + HeaderMatch_FAIL_ON_MISMATCH HeaderMatch_MismatchAction = 0 // Drop the request if no other rule matches (default) + HeaderMatch_CONTINUE_ON_MISMATCH HeaderMatch_MismatchAction = 1 // Keep checking other matches, log the mismatch + HeaderMatch_ADD_ON_MISMATCH HeaderMatch_MismatchAction = 2 // Add 'value' to the multivalued header + HeaderMatch_DELETE_ON_MISMATCH HeaderMatch_MismatchAction = 3 // Remove the whole mismatching header + HeaderMatch_REPLACE_ON_MISMATCH HeaderMatch_MismatchAction = 4 // Replace the whole mismatching header with 'value' +) + +// Enum value maps for HeaderMatch_MismatchAction. +var ( + HeaderMatch_MismatchAction_name = map[int32]string{ + 0: "FAIL_ON_MISMATCH", + 1: "CONTINUE_ON_MISMATCH", + 2: "ADD_ON_MISMATCH", + 3: "DELETE_ON_MISMATCH", + 4: "REPLACE_ON_MISMATCH", + } + HeaderMatch_MismatchAction_value = map[string]int32{ + "FAIL_ON_MISMATCH": 0, + "CONTINUE_ON_MISMATCH": 1, + "ADD_ON_MISMATCH": 2, + "DELETE_ON_MISMATCH": 3, + "REPLACE_ON_MISMATCH": 4, + } +) + +func (x HeaderMatch_MismatchAction) Enum() *HeaderMatch_MismatchAction { + p := new(HeaderMatch_MismatchAction) + *p = x + return p +} + +func (x HeaderMatch_MismatchAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderMatch_MismatchAction) Descriptor() protoreflect.EnumDescriptor { + return file_cilium_api_npds_proto_enumTypes[1].Descriptor() +} + +func (HeaderMatch_MismatchAction) Type() protoreflect.EnumType { + return &file_cilium_api_npds_proto_enumTypes[1] +} + +func (x HeaderMatch_MismatchAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderMatch_MismatchAction.Descriptor instead. +func (HeaderMatch_MismatchAction) EnumDescriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 1} +} + +// A network policy that is enforced by a filter on the network flows to/from +// associated hosts. +type NetworkPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IPs of the endpoint to which this policy applies. + // Required. + EndpointIps []string `protobuf:"bytes,1,rep,name=endpoint_ips,json=endpointIps,proto3" json:"endpoint_ips,omitempty"` + // The endpoint identifier associated with the network policy. + // Required. + EndpointId uint64 `protobuf:"varint,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` + // The part of the policy to be enforced at ingress by the filter, as a set + // of per-port network policies, one per destination L4 port. + // Every PortNetworkPolicy element in this set has a unique port / protocol + // combination. + // Optional. If empty, all flows in this direction are denied. + IngressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,3,rep,name=ingress_per_port_policies,json=ingressPerPortPolicies,proto3" json:"ingress_per_port_policies,omitempty"` + // The part of the policy to be enforced at egress by the filter, as a set + // of per-port network policies, one per destination L4 port. + // Every PortNetworkPolicy element in this set has a unique port / protocol + // combination. + // Optional. If empty, all flows in this direction are denied. + EgressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,4,rep,name=egress_per_port_policies,json=egressPerPortPolicies,proto3" json:"egress_per_port_policies,omitempty"` + // Name of the conntrack map to use with this policy. + // The paths to various Cilium conntrack maps are derived using this name. + // Optional. If empty, ipcache or hostmap lookup is used instead of conntrack + // map. + ConntrackMapName string `protobuf:"bytes,5,opt,name=conntrack_map_name,json=conntrackMapName,proto3" json:"conntrack_map_name,omitempty"` +} + +func (x *NetworkPolicy) Reset() { + *x = NetworkPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkPolicy) ProtoMessage() {} + +func (x *NetworkPolicy) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkPolicy.ProtoReflect.Descriptor instead. +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{0} +} + +func (x *NetworkPolicy) GetEndpointIps() []string { + if x != nil { + return x.EndpointIps + } + return nil +} + +func (x *NetworkPolicy) GetEndpointId() uint64 { + if x != nil { + return x.EndpointId + } + return 0 +} + +func (x *NetworkPolicy) GetIngressPerPortPolicies() []*PortNetworkPolicy { + if x != nil { + return x.IngressPerPortPolicies + } + return nil +} + +func (x *NetworkPolicy) GetEgressPerPortPolicies() []*PortNetworkPolicy { + if x != nil { + return x.EgressPerPortPolicies + } + return nil +} + +func (x *NetworkPolicy) GetConntrackMapName() string { + if x != nil { + return x.ConntrackMapName + } + return "" +} + +// A network policy to whitelist flows to a specific destination L4 port, +// as a conjunction of predicates on L3/L4/L7 flows. +// If all the predicates of a policy match a flow, the flow is whitelisted. +type PortNetworkPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The flows' destination L4 port number, as an unsigned 16-bit integer. + // If 0, all destination L4 port numbers are matched by this predicate. + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + // The end of the destination port range, if non-zero. + EndPort uint32 `protobuf:"varint,4,opt,name=end_port,json=endPort,proto3" json:"end_port,omitempty"` + // The flows' L4 transport protocol. + // Required. + Protocol v3.SocketAddress_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"` + // The network policy rules to be enforced on the flows to the port. + // Optional. A flow is matched by this predicate if either the set of + // rules is empty or any of the rules matches it. + Rules []*PortNetworkPolicyRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *PortNetworkPolicy) Reset() { + *x = PortNetworkPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PortNetworkPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PortNetworkPolicy) ProtoMessage() {} + +func (x *PortNetworkPolicy) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PortNetworkPolicy.ProtoReflect.Descriptor instead. +func (*PortNetworkPolicy) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{1} +} + +func (x *PortNetworkPolicy) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *PortNetworkPolicy) GetEndPort() uint32 { + if x != nil { + return x.EndPort + } + return 0 +} + +func (x *PortNetworkPolicy) GetProtocol() v3.SocketAddress_Protocol { + if x != nil { + return x.Protocol + } + return v3.SocketAddress_TCP +} + +func (x *PortNetworkPolicy) GetRules() []*PortNetworkPolicyRule { + if x != nil { + return x.Rules + } + return nil +} + +type TLSContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CA certificates. If present, the counterparty must provide a valid + // certificate. + // Deprecated, use 'validation_context_sds_secret' instead. + TrustedCa string `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"` + // Certificate chain. + // Deprecated, use 'tls_sds_secret' instead. + CertificateChain string `protobuf:"bytes,2,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // Private key + // Deprecated, use 'tls_sds_secret' instead. + PrivateKey string `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // Server Name Indicator. For downstream this helps choose the certificate to + // present to the client. For upstream this will be used as the SNI on the + // client connection. + ServerNames []string `protobuf:"bytes,4,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"` + // Name of an SDS secret for CA certificates. Secret is fetched from the same gRPC source as + // this Network Policy. If present, the counterparty must provide a valid certificate. + // May not be used at the same time with 'trusted_ca'. + ValidationContextSdsSecret string `protobuf:"bytes,5,opt,name=validation_context_sds_secret,json=validationContextSdsSecret,proto3" json:"validation_context_sds_secret,omitempty"` + // Name of an SDS secret for both TLS private key and certificate chain. Secret is fetched + // from the same gRPC source as this Network Policy. + // May not be used at the same time with 'certificate_chain' or 'private_key'. + TlsSdsSecret string `protobuf:"bytes,6,opt,name=tls_sds_secret,json=tlsSdsSecret,proto3" json:"tls_sds_secret,omitempty"` +} + +func (x *TLSContext) Reset() { + *x = TLSContext{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSContext) ProtoMessage() {} + +func (x *TLSContext) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSContext.ProtoReflect.Descriptor instead. +func (*TLSContext) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{2} +} + +func (x *TLSContext) GetTrustedCa() string { + if x != nil { + return x.TrustedCa + } + return "" +} + +func (x *TLSContext) GetCertificateChain() string { + if x != nil { + return x.CertificateChain + } + return "" +} + +func (x *TLSContext) GetPrivateKey() string { + if x != nil { + return x.PrivateKey + } + return "" +} + +func (x *TLSContext) GetServerNames() []string { + if x != nil { + return x.ServerNames + } + return nil +} + +func (x *TLSContext) GetValidationContextSdsSecret() string { + if x != nil { + return x.ValidationContextSdsSecret + } + return "" +} + +func (x *TLSContext) GetTlsSdsSecret() string { + if x != nil { + return x.TlsSdsSecret + } + return "" +} + +// A network policy rule, as a conjunction of predicates on L3/L7 flows. +// If all the predicates of a rule match a flow, the flow is matched by the +// rule. +type PortNetworkPolicyRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Traffic on this port is denied for all `remote_policies` if true + Deny bool `protobuf:"varint,8,opt,name=deny,proto3" json:"deny,omitempty"` + // Optional name for the rule, can be used in logging and error messages. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // The set of numeric remote security IDs explicitly allowed or denied. + // A flow is matched by this predicate if the identifier of the policy + // applied on the flow's remote host is contained in this set. + // Optional. If not specified, any remote host is matched by this predicate. + // This field is deprecated, use remote_policies instead. + // TODO: Remove when Cilium 1.14 no longer supported. + DeprecatedRemotePolicies_64 []uint64 `protobuf:"varint,1,rep,packed,name=deprecated_remote_policies_64,json=deprecatedRemotePolicies64,proto3" json:"deprecated_remote_policies_64,omitempty"` + RemotePolicies []uint32 `protobuf:"varint,7,rep,packed,name=remote_policies,json=remotePolicies,proto3" json:"remote_policies,omitempty"` + // Optional downstream TLS context. If present, the incoming connection must + // be a TLS connection. + DownstreamTlsContext *TLSContext `protobuf:"bytes,3,opt,name=downstream_tls_context,json=downstreamTlsContext,proto3" json:"downstream_tls_context,omitempty"` + // Optional upstream TLS context. If present, the outgoing connection will use + // TLS. + UpstreamTlsContext *TLSContext `protobuf:"bytes,4,opt,name=upstream_tls_context,json=upstreamTlsContext,proto3" json:"upstream_tls_context,omitempty"` + // Optional allowed SNIs in TLS handshake. + ServerNames []string `protobuf:"bytes,6,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"` + // Optional L7 protocol parser name. This is only used if the parser is not + // one of the well knows ones. If specified, the l7 parser having this name + // needs to be built in to libcilium.so. + L7Proto string `protobuf:"bytes,2,opt,name=l7_proto,json=l7Proto,proto3" json:"l7_proto,omitempty"` + // Optional. If not specified, any L7 request is matched by this predicate. + // All rules on any given port must have the same type of L7 rules! + // + // Types that are assignable to L7: + // + // *PortNetworkPolicyRule_HttpRules + // *PortNetworkPolicyRule_KafkaRules + // *PortNetworkPolicyRule_L7Rules + L7 isPortNetworkPolicyRule_L7 `protobuf_oneof:"l7"` +} + +func (x *PortNetworkPolicyRule) Reset() { + *x = PortNetworkPolicyRule{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PortNetworkPolicyRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PortNetworkPolicyRule) ProtoMessage() {} + +func (x *PortNetworkPolicyRule) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PortNetworkPolicyRule.ProtoReflect.Descriptor instead. +func (*PortNetworkPolicyRule) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{3} +} + +func (x *PortNetworkPolicyRule) GetDeny() bool { + if x != nil { + return x.Deny + } + return false +} + +func (x *PortNetworkPolicyRule) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PortNetworkPolicyRule) GetDeprecatedRemotePolicies_64() []uint64 { + if x != nil { + return x.DeprecatedRemotePolicies_64 + } + return nil +} + +func (x *PortNetworkPolicyRule) GetRemotePolicies() []uint32 { + if x != nil { + return x.RemotePolicies + } + return nil +} + +func (x *PortNetworkPolicyRule) GetDownstreamTlsContext() *TLSContext { + if x != nil { + return x.DownstreamTlsContext + } + return nil +} + +func (x *PortNetworkPolicyRule) GetUpstreamTlsContext() *TLSContext { + if x != nil { + return x.UpstreamTlsContext + } + return nil +} + +func (x *PortNetworkPolicyRule) GetServerNames() []string { + if x != nil { + return x.ServerNames + } + return nil +} + +func (x *PortNetworkPolicyRule) GetL7Proto() string { + if x != nil { + return x.L7Proto + } + return "" +} + +func (m *PortNetworkPolicyRule) GetL7() isPortNetworkPolicyRule_L7 { + if m != nil { + return m.L7 + } + return nil +} + +func (x *PortNetworkPolicyRule) GetHttpRules() *HttpNetworkPolicyRules { + if x, ok := x.GetL7().(*PortNetworkPolicyRule_HttpRules); ok { + return x.HttpRules + } + return nil +} + +func (x *PortNetworkPolicyRule) GetKafkaRules() *KafkaNetworkPolicyRules { + if x, ok := x.GetL7().(*PortNetworkPolicyRule_KafkaRules); ok { + return x.KafkaRules + } + return nil +} + +func (x *PortNetworkPolicyRule) GetL7Rules() *L7NetworkPolicyRules { + if x, ok := x.GetL7().(*PortNetworkPolicyRule_L7Rules); ok { + return x.L7Rules + } + return nil +} + +type isPortNetworkPolicyRule_L7 interface { + isPortNetworkPolicyRule_L7() +} + +type PortNetworkPolicyRule_HttpRules struct { + // The set of HTTP network policy rules. + // An HTTP request is matched by this predicate if any of its rules matches + // the request. + HttpRules *HttpNetworkPolicyRules `protobuf:"bytes,100,opt,name=http_rules,json=httpRules,proto3,oneof"` +} + +type PortNetworkPolicyRule_KafkaRules struct { + // The set of Kafka network policy rules. + // A Kafka request is matched by this predicate if any of its rules matches + // the request. + KafkaRules *KafkaNetworkPolicyRules `protobuf:"bytes,101,opt,name=kafka_rules,json=kafkaRules,proto3,oneof"` +} + +type PortNetworkPolicyRule_L7Rules struct { + // Set of Generic policy rules used when 'l7_proto' is defined. + // Only to be used for l7 protocols for which a specific oneof + // is not defined + L7Rules *L7NetworkPolicyRules `protobuf:"bytes,102,opt,name=l7_rules,json=l7Rules,proto3,oneof"` +} + +func (*PortNetworkPolicyRule_HttpRules) isPortNetworkPolicyRule_L7() {} + +func (*PortNetworkPolicyRule_KafkaRules) isPortNetworkPolicyRule_L7() {} + +func (*PortNetworkPolicyRule_L7Rules) isPortNetworkPolicyRule_L7() {} + +// A set of network policy rules that match HTTP requests. +type HttpNetworkPolicyRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of HTTP network policy rules. + // An HTTP request is matched if any of its rules matches the request. + // Required and may not be empty. + HttpRules []*HttpNetworkPolicyRule `protobuf:"bytes,1,rep,name=http_rules,json=httpRules,proto3" json:"http_rules,omitempty"` +} + +func (x *HttpNetworkPolicyRules) Reset() { + *x = HttpNetworkPolicyRules{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpNetworkPolicyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpNetworkPolicyRules) ProtoMessage() {} + +func (x *HttpNetworkPolicyRules) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpNetworkPolicyRules.ProtoReflect.Descriptor instead. +func (*HttpNetworkPolicyRules) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{4} +} + +func (x *HttpNetworkPolicyRules) GetHttpRules() []*HttpNetworkPolicyRule { + if x != nil { + return x.HttpRules + } + return nil +} + +type HeaderMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // empty for presence match. For secret data use 'value_sds_secret' instead. + MatchAction HeaderMatch_MatchAction `protobuf:"varint,3,opt,name=match_action,json=matchAction,proto3,enum=cilium.HeaderMatch_MatchAction" json:"match_action,omitempty"` + MismatchAction HeaderMatch_MismatchAction `protobuf:"varint,4,opt,name=mismatch_action,json=mismatchAction,proto3,enum=cilium.HeaderMatch_MismatchAction" json:"mismatch_action,omitempty"` + // Generic secret name for fetching value via SDS. Secret is fetched from the same gRPC source as + // this Network Policy. + ValueSdsSecret string `protobuf:"bytes,5,opt,name=value_sds_secret,json=valueSdsSecret,proto3" json:"value_sds_secret,omitempty"` +} + +func (x *HeaderMatch) Reset() { + *x = HeaderMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderMatch) ProtoMessage() {} + +func (x *HeaderMatch) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderMatch.ProtoReflect.Descriptor instead. +func (*HeaderMatch) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{5} +} + +func (x *HeaderMatch) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderMatch) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *HeaderMatch) GetMatchAction() HeaderMatch_MatchAction { + if x != nil { + return x.MatchAction + } + return HeaderMatch_CONTINUE_ON_MATCH +} + +func (x *HeaderMatch) GetMismatchAction() HeaderMatch_MismatchAction { + if x != nil { + return x.MismatchAction + } + return HeaderMatch_FAIL_ON_MISMATCH +} + +func (x *HeaderMatch) GetValueSdsSecret() string { + if x != nil { + return x.ValueSdsSecret + } + return "" +} + +// An HTTP network policy rule, as a conjunction of predicates on HTTP requests. +// If all the predicates of a rule match an HTTP request, the request is +// allowed. Otherwise, it is denied. +type HttpNetworkPolicyRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of matchers on the HTTP request's headers' names and values. + // If all the matchers in this set match an HTTP request, the request is + // allowed by this rule. Otherwise, it is denied. + // + // Some special header names are: + // + // * *:uri*: The HTTP request's URI. + // * *:method*: The HTTP request's method. + // * *:authority*: Also maps to the HTTP 1.1 *Host* header. + // + // Optional. If empty, matches any HTTP request. + Headers []*v31.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + // header_matches is a set of HTTP header name and value pairs that + // will be matched against the request headers, if all the other match + // requirements in 'headers' are met. Each HeaderAction determines what to do + // when there is a match or mismatch. + // + // Optional. + HeaderMatches []*HeaderMatch `protobuf:"bytes,2,rep,name=header_matches,json=headerMatches,proto3" json:"header_matches,omitempty"` +} + +func (x *HttpNetworkPolicyRule) Reset() { + *x = HttpNetworkPolicyRule{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpNetworkPolicyRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpNetworkPolicyRule) ProtoMessage() {} + +func (x *HttpNetworkPolicyRule) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpNetworkPolicyRule.ProtoReflect.Descriptor instead. +func (*HttpNetworkPolicyRule) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{6} +} + +func (x *HttpNetworkPolicyRule) GetHeaders() []*v31.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpNetworkPolicyRule) GetHeaderMatches() []*HeaderMatch { + if x != nil { + return x.HeaderMatches + } + return nil +} + +// A set of network policy rules that match Kafka requests. +type KafkaNetworkPolicyRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of Kafka network policy rules. + // A Kafka request is matched if any of its rules matches the request. + // Required and may not be empty. + KafkaRules []*KafkaNetworkPolicyRule `protobuf:"bytes,1,rep,name=kafka_rules,json=kafkaRules,proto3" json:"kafka_rules,omitempty"` +} + +func (x *KafkaNetworkPolicyRules) Reset() { + *x = KafkaNetworkPolicyRules{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KafkaNetworkPolicyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KafkaNetworkPolicyRules) ProtoMessage() {} + +func (x *KafkaNetworkPolicyRules) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KafkaNetworkPolicyRules.ProtoReflect.Descriptor instead. +func (*KafkaNetworkPolicyRules) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{7} +} + +func (x *KafkaNetworkPolicyRules) GetKafkaRules() []*KafkaNetworkPolicyRule { + if x != nil { + return x.KafkaRules + } + return nil +} + +// A Kafka network policy rule, as a conjunction of predicates on Kafka +// requests. If all the predicates of a rule match a Kafka request, the request +// is allowed. Otherwise, it is denied. +type KafkaNetworkPolicyRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Kafka request's API version. + // If < 0, all Kafka requests are matched by this predicate. + ApiVersion int32 `protobuf:"varint,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + // Set of allowed API keys in the Kafka request. + // If none, all Kafka requests are matched by this predicate. + ApiKeys []int32 `protobuf:"varint,2,rep,packed,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"` + // The Kafka request's client ID. + // Optional. If not specified, all Kafka requests are matched by this + // predicate. If specified, this predicates only matches requests that contain + // this client ID, and never matches requests that don't contain any client + // ID. + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // The Kafka request's topic. + // Optional. If not specified, this rule will not consider the Kafka request's + // topics. If specified, this predicates only matches requests that contain + // this topic, and never matches requests that don't contain any topic. + // However, messages that can not contain a topic will also me matched. + Topic string `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *KafkaNetworkPolicyRule) Reset() { + *x = KafkaNetworkPolicyRule{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KafkaNetworkPolicyRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KafkaNetworkPolicyRule) ProtoMessage() {} + +func (x *KafkaNetworkPolicyRule) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KafkaNetworkPolicyRule.ProtoReflect.Descriptor instead. +func (*KafkaNetworkPolicyRule) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{8} +} + +func (x *KafkaNetworkPolicyRule) GetApiVersion() int32 { + if x != nil { + return x.ApiVersion + } + return 0 +} + +func (x *KafkaNetworkPolicyRule) GetApiKeys() []int32 { + if x != nil { + return x.ApiKeys + } + return nil +} + +func (x *KafkaNetworkPolicyRule) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +func (x *KafkaNetworkPolicyRule) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +// A set of network policy rules that match generic L7 requests. +type L7NetworkPolicyRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of allowing l7 policy rules. + // A request is allowed if any of these rules matches the request, + // and the request does not match any of the deny rules. + // Optional. If missing or empty then all requests are allowed, unless + // denied by a deny rule. + L7AllowRules []*L7NetworkPolicyRule `protobuf:"bytes,1,rep,name=l7_allow_rules,json=l7AllowRules,proto3" json:"l7_allow_rules,omitempty"` + // The set of denying l7 policy rules. + // A request is denied if any of these rules matches the request. + // A request that is not denied may be allowed by 'l7_allow_rules'. + // Optional. + L7DenyRules []*L7NetworkPolicyRule `protobuf:"bytes,2,rep,name=l7_deny_rules,json=l7DenyRules,proto3" json:"l7_deny_rules,omitempty"` +} + +func (x *L7NetworkPolicyRules) Reset() { + *x = L7NetworkPolicyRules{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *L7NetworkPolicyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*L7NetworkPolicyRules) ProtoMessage() {} + +func (x *L7NetworkPolicyRules) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use L7NetworkPolicyRules.ProtoReflect.Descriptor instead. +func (*L7NetworkPolicyRules) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{9} +} + +func (x *L7NetworkPolicyRules) GetL7AllowRules() []*L7NetworkPolicyRule { + if x != nil { + return x.L7AllowRules + } + return nil +} + +func (x *L7NetworkPolicyRules) GetL7DenyRules() []*L7NetworkPolicyRule { + if x != nil { + return x.L7DenyRules + } + return nil +} + +// A generic L7 policy rule, as a conjunction of predicates on l7 requests. +// If all the predicates of a rule match a request, the request is allowed. +// Otherwise, it is denied. +type L7NetworkPolicyRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional rule name, can be used in logging and error messages. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Generic rule for Go extensions. + // Optional. If empty, matches any request. Not allowed if 'metadata_rule' is + // present. + Rule map[string]string `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Generic rule for Envoy metadata enforcement. All matchers must match for + // the rule to allow the request/connection. Optional. If empty, matches any + // request. Not allowed if 'rule' is present. + MetadataRule []*v32.MetadataMatcher `protobuf:"bytes,2,rep,name=metadata_rule,json=metadataRule,proto3" json:"metadata_rule,omitempty"` +} + +func (x *L7NetworkPolicyRule) Reset() { + *x = L7NetworkPolicyRule{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *L7NetworkPolicyRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*L7NetworkPolicyRule) ProtoMessage() {} + +func (x *L7NetworkPolicyRule) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use L7NetworkPolicyRule.ProtoReflect.Descriptor instead. +func (*L7NetworkPolicyRule) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{10} +} + +func (x *L7NetworkPolicyRule) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *L7NetworkPolicyRule) GetRule() map[string]string { + if x != nil { + return x.Rule + } + return nil +} + +func (x *L7NetworkPolicyRule) GetMetadataRule() []*v32.MetadataMatcher { + if x != nil { + return x.MetadataRule + } + return nil +} + +// Cilium's network policy manager fills this message with all currently known network policies. +type NetworkPoliciesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The loaded networkpolicy configs. + Networkpolicies []*NetworkPolicy `protobuf:"bytes,1,rep,name=networkpolicies,proto3" json:"networkpolicies,omitempty"` +} + +func (x *NetworkPoliciesConfigDump) Reset() { + *x = NetworkPoliciesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_npds_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkPoliciesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkPoliciesConfigDump) ProtoMessage() {} + +func (x *NetworkPoliciesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_npds_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkPoliciesConfigDump.ProtoReflect.Descriptor instead. +func (*NetworkPoliciesConfigDump) Descriptor() ([]byte, []int) { + return file_cilium_api_npds_proto_rawDescGZIP(), []int{11} +} + +func (x *NetworkPoliciesConfigDump) GetNetworkpolicies() []*NetworkPolicy { + if x != nil { + return x.Networkpolicies + } + return nil +} + +var File_cilium_api_npds_proto protoreflect.FileDescriptor + +var file_cilium_api_npds_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x70, 0x64, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x1a, + 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x02, 0x0a, + 0x0d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, + 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x08, 0x01, 0x10, 0x02, + 0x22, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x49, 0x70, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x54, 0x0a, 0x19, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x50, 0x6f, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x18, 0x65, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, + 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, + 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x6d, 0x61, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, + 0x74, 0x72, 0x61, 0x63, 0x6b, 0x4d, 0x61, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd7, 0x01, 0x0a, + 0x11, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x24, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x07, + 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x33, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x52, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x43, 0x61, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, + 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0xbf, + 0x04, 0x0a, 0x15, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x65, 0x6e, 0x79, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x65, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x41, 0x0a, 0x1d, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x36, + 0x34, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x1a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x36, 0x34, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x16, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, + 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x52, 0x14, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x54, 0x4c, + 0x53, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, + 0x19, 0x0a, 0x08, 0x6c, 0x37, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6c, 0x37, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3f, 0x0a, 0x0a, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, + 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0b, 0x6b, + 0x61, 0x66, 0x6b, 0x61, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x48, 0x00, 0x52, 0x0a, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x39, 0x0a, 0x08, 0x6c, 0x37, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x66, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x07, 0x6c, 0x37, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x04, 0x0a, 0x02, 0x6c, 0x37, + 0x22, 0x60, 0x0a, 0x16, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x22, 0xd2, 0x03, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x69, 0x73, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x22, 0x4c, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x54, 0x49, 0x4e, 0x55, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x54, 0x43, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x4f, + 0x4e, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x86, + 0x01, 0x0a, 0x0e, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4e, 0x54, 0x49, + 0x4e, 0x55, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, + 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x44, 0x44, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, + 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x17, + 0x0a, 0x13, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x48, 0x74, 0x74, 0x70, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, + 0x65, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x69, 0x6c, 0x69, + 0x75, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x0d, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0x64, 0x0a, + 0x17, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0b, 0x6b, 0x61, 0x66, 0x6b, + 0x61, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0a, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x16, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x19, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x07, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, + 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, + 0x39, 0x2e, 0x5f, 0x2d, 0x5d, 0x2a, 0x24, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x64, 0x12, 0x31, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x1b, 0xfa, 0x42, 0x18, 0x72, 0x16, 0x18, 0xff, 0x01, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, + 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x2e, 0x5f, 0x2d, 0x5d, 0x2a, 0x24, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, + 0x0e, 0x6c, 0x37, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, + 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, + 0x6c, 0x65, 0x52, 0x0c, 0x6c, 0x37, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x3f, 0x0a, 0x0d, 0x6c, 0x37, 0x5f, 0x64, 0x65, 0x6e, 0x79, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, + 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0b, 0x6c, 0x37, 0x44, 0x65, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x52, 0x75, 0x6c, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5c, + 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x3f, 0x0a, 0x0f, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x32, 0xda, 0x02, 0x0a, + 0x1d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, + 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x9e, 0x01, 0x0a, 0x14, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, + 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1c, 0x8a, 0xa4, 0x96, + 0xf3, 0x07, 0x16, 0x0a, 0x14, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, + 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cilium_api_npds_proto_rawDescOnce sync.Once + file_cilium_api_npds_proto_rawDescData = file_cilium_api_npds_proto_rawDesc +) + +func file_cilium_api_npds_proto_rawDescGZIP() []byte { + file_cilium_api_npds_proto_rawDescOnce.Do(func() { + file_cilium_api_npds_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_npds_proto_rawDescData) + }) + return file_cilium_api_npds_proto_rawDescData +} + +var file_cilium_api_npds_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cilium_api_npds_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cilium_api_npds_proto_goTypes = []interface{}{ + (HeaderMatch_MatchAction)(0), // 0: cilium.HeaderMatch.MatchAction + (HeaderMatch_MismatchAction)(0), // 1: cilium.HeaderMatch.MismatchAction + (*NetworkPolicy)(nil), // 2: cilium.NetworkPolicy + (*PortNetworkPolicy)(nil), // 3: cilium.PortNetworkPolicy + (*TLSContext)(nil), // 4: cilium.TLSContext + (*PortNetworkPolicyRule)(nil), // 5: cilium.PortNetworkPolicyRule + (*HttpNetworkPolicyRules)(nil), // 6: cilium.HttpNetworkPolicyRules + (*HeaderMatch)(nil), // 7: cilium.HeaderMatch + (*HttpNetworkPolicyRule)(nil), // 8: cilium.HttpNetworkPolicyRule + (*KafkaNetworkPolicyRules)(nil), // 9: cilium.KafkaNetworkPolicyRules + (*KafkaNetworkPolicyRule)(nil), // 10: cilium.KafkaNetworkPolicyRule + (*L7NetworkPolicyRules)(nil), // 11: cilium.L7NetworkPolicyRules + (*L7NetworkPolicyRule)(nil), // 12: cilium.L7NetworkPolicyRule + (*NetworkPoliciesConfigDump)(nil), // 13: cilium.NetworkPoliciesConfigDump + nil, // 14: cilium.L7NetworkPolicyRule.RuleEntry + (v3.SocketAddress_Protocol)(0), // 15: envoy.config.core.v3.SocketAddress.Protocol + (*v31.HeaderMatcher)(nil), // 16: envoy.config.route.v3.HeaderMatcher + (*v32.MetadataMatcher)(nil), // 17: envoy.type.matcher.v3.MetadataMatcher + (*v33.DiscoveryRequest)(nil), // 18: envoy.service.discovery.v3.DiscoveryRequest + (*v33.DiscoveryResponse)(nil), // 19: envoy.service.discovery.v3.DiscoveryResponse +} +var file_cilium_api_npds_proto_depIdxs = []int32{ + 3, // 0: cilium.NetworkPolicy.ingress_per_port_policies:type_name -> cilium.PortNetworkPolicy + 3, // 1: cilium.NetworkPolicy.egress_per_port_policies:type_name -> cilium.PortNetworkPolicy + 15, // 2: cilium.PortNetworkPolicy.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol + 5, // 3: cilium.PortNetworkPolicy.rules:type_name -> cilium.PortNetworkPolicyRule + 4, // 4: cilium.PortNetworkPolicyRule.downstream_tls_context:type_name -> cilium.TLSContext + 4, // 5: cilium.PortNetworkPolicyRule.upstream_tls_context:type_name -> cilium.TLSContext + 6, // 6: cilium.PortNetworkPolicyRule.http_rules:type_name -> cilium.HttpNetworkPolicyRules + 9, // 7: cilium.PortNetworkPolicyRule.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRules + 11, // 8: cilium.PortNetworkPolicyRule.l7_rules:type_name -> cilium.L7NetworkPolicyRules + 8, // 9: cilium.HttpNetworkPolicyRules.http_rules:type_name -> cilium.HttpNetworkPolicyRule + 0, // 10: cilium.HeaderMatch.match_action:type_name -> cilium.HeaderMatch.MatchAction + 1, // 11: cilium.HeaderMatch.mismatch_action:type_name -> cilium.HeaderMatch.MismatchAction + 16, // 12: cilium.HttpNetworkPolicyRule.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 7, // 13: cilium.HttpNetworkPolicyRule.header_matches:type_name -> cilium.HeaderMatch + 10, // 14: cilium.KafkaNetworkPolicyRules.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRule + 12, // 15: cilium.L7NetworkPolicyRules.l7_allow_rules:type_name -> cilium.L7NetworkPolicyRule + 12, // 16: cilium.L7NetworkPolicyRules.l7_deny_rules:type_name -> cilium.L7NetworkPolicyRule + 14, // 17: cilium.L7NetworkPolicyRule.rule:type_name -> cilium.L7NetworkPolicyRule.RuleEntry + 17, // 18: cilium.L7NetworkPolicyRule.metadata_rule:type_name -> envoy.type.matcher.v3.MetadataMatcher + 2, // 19: cilium.NetworkPoliciesConfigDump.networkpolicies:type_name -> cilium.NetworkPolicy + 18, // 20: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 18, // 21: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 19, // 22: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 19, // 23: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 22, // [22:24] is the sub-list for method output_type + 20, // [20:22] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_cilium_api_npds_proto_init() } +func file_cilium_api_npds_proto_init() { + if File_cilium_api_npds_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_npds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PortNetworkPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PortNetworkPolicyRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpNetworkPolicyRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpNetworkPolicyRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KafkaNetworkPolicyRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KafkaNetworkPolicyRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*L7NetworkPolicyRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*L7NetworkPolicyRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_npds_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkPoliciesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cilium_api_npds_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*PortNetworkPolicyRule_HttpRules)(nil), + (*PortNetworkPolicyRule_KafkaRules)(nil), + (*PortNetworkPolicyRule_L7Rules)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_npds_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cilium_api_npds_proto_goTypes, + DependencyIndexes: file_cilium_api_npds_proto_depIdxs, + EnumInfos: file_cilium_api_npds_proto_enumTypes, + MessageInfos: file_cilium_api_npds_proto_msgTypes, + }.Build() + File_cilium_api_npds_proto = out.File + file_cilium_api_npds_proto_rawDesc = nil + file_cilium_api_npds_proto_goTypes = nil + file_cilium_api_npds_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NetworkPolicyDiscoveryServiceClient is the client API for NetworkPolicyDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NetworkPolicyDiscoveryServiceClient interface { + StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error) + FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error) +} + +type networkPolicyDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNetworkPolicyDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyDiscoveryServiceClient { + return &networkPolicyDiscoveryServiceClient{cc} +} + +func (c *networkPolicyDiscoveryServiceClient) StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error) { + stream, err := c.cc.NewStream(ctx, &_NetworkPolicyDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyDiscoveryService/StreamNetworkPolicies", opts...) + if err != nil { + return nil, err + } + x := &networkPolicyDiscoveryServiceStreamNetworkPoliciesClient{stream} + return x, nil +} + +type NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient interface { + Send(*v33.DiscoveryRequest) error + Recv() (*v33.DiscoveryResponse, error) + grpc.ClientStream +} + +type networkPolicyDiscoveryServiceStreamNetworkPoliciesClient struct { + grpc.ClientStream +} + +func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Send(m *v33.DiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Recv() (*v33.DiscoveryResponse, error) { + m := new(v33.DiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *networkPolicyDiscoveryServiceClient) FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error) { + out := new(v33.DiscoveryResponse) + err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NetworkPolicyDiscoveryServiceServer is the server API for NetworkPolicyDiscoveryService service. +type NetworkPolicyDiscoveryServiceServer interface { + StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error + FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error) +} + +// UnimplementedNetworkPolicyDiscoveryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNetworkPolicyDiscoveryServiceServer struct { +} + +func (*UnimplementedNetworkPolicyDiscoveryServiceServer) StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicies not implemented") +} +func (*UnimplementedNetworkPolicyDiscoveryServiceServer) FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicies not implemented") +} + +func RegisterNetworkPolicyDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyDiscoveryServiceServer) { + s.RegisterService(&_NetworkPolicyDiscoveryService_serviceDesc, srv) +} + +func _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(NetworkPolicyDiscoveryServiceServer).StreamNetworkPolicies(&networkPolicyDiscoveryServiceStreamNetworkPoliciesServer{stream}) +} + +type NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer interface { + Send(*v33.DiscoveryResponse) error + Recv() (*v33.DiscoveryRequest, error) + grpc.ServerStream +} + +type networkPolicyDiscoveryServiceStreamNetworkPoliciesServer struct { + grpc.ServerStream +} + +func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Send(m *v33.DiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Recv() (*v33.DiscoveryRequest, error) { + m := new(v33.DiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v33.DiscoveryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, req.(*v33.DiscoveryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NetworkPolicyDiscoveryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cilium.NetworkPolicyDiscoveryService", + HandlerType: (*NetworkPolicyDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchNetworkPolicies", + Handler: _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamNetworkPolicies", + Handler: _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "cilium/api/npds.proto", +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go new file mode 100644 index 0000000000..70118cedc2 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go @@ -0,0 +1,1971 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/npds.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/cilium/proxy/go/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.SocketAddress_Protocol(0) +) + +// Validate checks the field values on NetworkPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *NetworkPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NetworkPolicy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in NetworkPolicyMultiError, or +// nil if none found. +func (m *NetworkPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *NetworkPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetEndpointIps()); l < 1 || l > 2 { + err := NetworkPolicyValidationError{ + field: "EndpointIps", + reason: "value must contain between 1 and 2 items, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetEndpointIps() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := NetworkPolicyValidationError{ + field: fmt.Sprintf("EndpointIps[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for EndpointId + + for idx, item := range m.GetIngressPerPortPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NetworkPolicyValidationError{ + field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NetworkPolicyValidationError{ + field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NetworkPolicyValidationError{ + field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetEgressPerPortPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NetworkPolicyValidationError{ + field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NetworkPolicyValidationError{ + field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NetworkPolicyValidationError{ + field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for ConntrackMapName + + if len(errors) > 0 { + return NetworkPolicyMultiError(errors) + } + + return nil +} + +// NetworkPolicyMultiError is an error wrapping multiple validation errors +// returned by NetworkPolicy.ValidateAll() if the designated constraints +// aren't met. +type NetworkPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NetworkPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NetworkPolicyMultiError) AllErrors() []error { return m } + +// NetworkPolicyValidationError is the validation error returned by +// NetworkPolicy.Validate if the designated constraints aren't met. +type NetworkPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NetworkPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NetworkPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NetworkPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NetworkPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NetworkPolicyValidationError) ErrorName() string { return "NetworkPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e NetworkPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNetworkPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NetworkPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NetworkPolicyValidationError{} + +// Validate checks the field values on PortNetworkPolicy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PortNetworkPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PortNetworkPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PortNetworkPolicyMultiError, or nil if none found. +func (m *PortNetworkPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *PortNetworkPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPort() > 65535 { + err := PortNetworkPolicyValidationError{ + field: "Port", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetEndPort() > 65535 { + err := PortNetworkPolicyValidationError{ + field: "EndPort", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Protocol + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return PortNetworkPolicyMultiError(errors) + } + + return nil +} + +// PortNetworkPolicyMultiError is an error wrapping multiple validation errors +// returned by PortNetworkPolicy.ValidateAll() if the designated constraints +// aren't met. +type PortNetworkPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PortNetworkPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PortNetworkPolicyMultiError) AllErrors() []error { return m } + +// PortNetworkPolicyValidationError is the validation error returned by +// PortNetworkPolicy.Validate if the designated constraints aren't met. +type PortNetworkPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PortNetworkPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PortNetworkPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PortNetworkPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PortNetworkPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PortNetworkPolicyValidationError) ErrorName() string { + return "PortNetworkPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e PortNetworkPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPortNetworkPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PortNetworkPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PortNetworkPolicyValidationError{} + +// Validate checks the field values on TLSContext with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TLSContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSContext with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TLSContextMultiError, or +// nil if none found. +func (m *TLSContext) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TrustedCa + + // no validation rules for CertificateChain + + // no validation rules for PrivateKey + + // no validation rules for ValidationContextSdsSecret + + // no validation rules for TlsSdsSecret + + if len(errors) > 0 { + return TLSContextMultiError(errors) + } + + return nil +} + +// TLSContextMultiError is an error wrapping multiple validation errors +// returned by TLSContext.ValidateAll() if the designated constraints aren't met. +type TLSContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSContextMultiError) AllErrors() []error { return m } + +// TLSContextValidationError is the validation error returned by +// TLSContext.Validate if the designated constraints aren't met. +type TLSContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSContextValidationError) ErrorName() string { return "TLSContextValidationError" } + +// Error satisfies the builtin error interface +func (e TLSContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSContextValidationError{} + +// Validate checks the field values on PortNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PortNetworkPolicyRule) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PortNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PortNetworkPolicyRuleMultiError, or nil if none found. +func (m *PortNetworkPolicyRule) ValidateAll() error { + return m.validate(true) +} + +func (m *PortNetworkPolicyRule) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Deny + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetDownstreamTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "DownstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "DownstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyRuleValidationError{ + field: "DownstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "UpstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "UpstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyRuleValidationError{ + field: "UpstreamTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for L7Proto + + switch v := m.L7.(type) { + case *PortNetworkPolicyRule_HttpRules: + if v == nil { + err := PortNetworkPolicyRuleValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHttpRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "HttpRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "HttpRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyRuleValidationError{ + field: "HttpRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PortNetworkPolicyRule_KafkaRules: + if v == nil { + err := PortNetworkPolicyRuleValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetKafkaRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "KafkaRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "KafkaRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKafkaRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyRuleValidationError{ + field: "KafkaRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PortNetworkPolicyRule_L7Rules: + if v == nil { + err := PortNetworkPolicyRuleValidationError{ + field: "L7", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetL7Rules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "L7Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PortNetworkPolicyRuleValidationError{ + field: "L7Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetL7Rules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PortNetworkPolicyRuleValidationError{ + field: "L7Rules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PortNetworkPolicyRuleMultiError(errors) + } + + return nil +} + +// PortNetworkPolicyRuleMultiError is an error wrapping multiple validation +// errors returned by PortNetworkPolicyRule.ValidateAll() if the designated +// constraints aren't met. +type PortNetworkPolicyRuleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PortNetworkPolicyRuleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PortNetworkPolicyRuleMultiError) AllErrors() []error { return m } + +// PortNetworkPolicyRuleValidationError is the validation error returned by +// PortNetworkPolicyRule.Validate if the designated constraints aren't met. +type PortNetworkPolicyRuleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PortNetworkPolicyRuleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PortNetworkPolicyRuleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PortNetworkPolicyRuleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PortNetworkPolicyRuleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PortNetworkPolicyRuleValidationError) ErrorName() string { + return "PortNetworkPolicyRuleValidationError" +} + +// Error satisfies the builtin error interface +func (e PortNetworkPolicyRuleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPortNetworkPolicyRule.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PortNetworkPolicyRuleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PortNetworkPolicyRuleValidationError{} + +// Validate checks the field values on HttpNetworkPolicyRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpNetworkPolicyRules) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpNetworkPolicyRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpNetworkPolicyRulesMultiError, or nil if none found. +func (m *HttpNetworkPolicyRules) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpNetworkPolicyRules) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetHttpRules()) < 1 { + err := HttpNetworkPolicyRulesValidationError{ + field: "HttpRules", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHttpRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("HttpRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("HttpRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("HttpRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpNetworkPolicyRulesMultiError(errors) + } + + return nil +} + +// HttpNetworkPolicyRulesMultiError is an error wrapping multiple validation +// errors returned by HttpNetworkPolicyRules.ValidateAll() if the designated +// constraints aren't met. +type HttpNetworkPolicyRulesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpNetworkPolicyRulesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpNetworkPolicyRulesMultiError) AllErrors() []error { return m } + +// HttpNetworkPolicyRulesValidationError is the validation error returned by +// HttpNetworkPolicyRules.Validate if the designated constraints aren't met. +type HttpNetworkPolicyRulesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpNetworkPolicyRulesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpNetworkPolicyRulesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpNetworkPolicyRulesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpNetworkPolicyRulesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpNetworkPolicyRulesValidationError) ErrorName() string { + return "HttpNetworkPolicyRulesValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpNetworkPolicyRulesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpNetworkPolicyRules.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpNetworkPolicyRulesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpNetworkPolicyRulesValidationError{} + +// Validate checks the field values on HeaderMatch with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMatch with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMatchMultiError, or +// nil if none found. +func (m *HeaderMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HeaderMatchValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Value + + // no validation rules for MatchAction + + // no validation rules for MismatchAction + + // no validation rules for ValueSdsSecret + + if len(errors) > 0 { + return HeaderMatchMultiError(errors) + } + + return nil +} + +// HeaderMatchMultiError is an error wrapping multiple validation errors +// returned by HeaderMatch.ValidateAll() if the designated constraints aren't met. +type HeaderMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderMatchMultiError) AllErrors() []error { return m } + +// HeaderMatchValidationError is the validation error returned by +// HeaderMatch.Validate if the designated constraints aren't met. +type HeaderMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderMatchValidationError) ErrorName() string { return "HeaderMatchValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderMatchValidationError{} + +// Validate checks the field values on HttpNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpNetworkPolicyRule) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpNetworkPolicyRuleMultiError, or nil if none found. +func (m *HttpNetworkPolicyRule) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpNetworkPolicyRule) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetHeaderMatches() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("HeaderMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("HeaderMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpNetworkPolicyRuleValidationError{ + field: fmt.Sprintf("HeaderMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpNetworkPolicyRuleMultiError(errors) + } + + return nil +} + +// HttpNetworkPolicyRuleMultiError is an error wrapping multiple validation +// errors returned by HttpNetworkPolicyRule.ValidateAll() if the designated +// constraints aren't met. +type HttpNetworkPolicyRuleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpNetworkPolicyRuleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpNetworkPolicyRuleMultiError) AllErrors() []error { return m } + +// HttpNetworkPolicyRuleValidationError is the validation error returned by +// HttpNetworkPolicyRule.Validate if the designated constraints aren't met. +type HttpNetworkPolicyRuleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpNetworkPolicyRuleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpNetworkPolicyRuleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpNetworkPolicyRuleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpNetworkPolicyRuleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpNetworkPolicyRuleValidationError) ErrorName() string { + return "HttpNetworkPolicyRuleValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpNetworkPolicyRuleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpNetworkPolicyRule.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpNetworkPolicyRuleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpNetworkPolicyRuleValidationError{} + +// Validate checks the field values on KafkaNetworkPolicyRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *KafkaNetworkPolicyRules) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KafkaNetworkPolicyRules with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KafkaNetworkPolicyRulesMultiError, or nil if none found. +func (m *KafkaNetworkPolicyRules) ValidateAll() error { + return m.validate(true) +} + +func (m *KafkaNetworkPolicyRules) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetKafkaRules()) < 1 { + err := KafkaNetworkPolicyRulesValidationError{ + field: "KafkaRules", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetKafkaRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KafkaNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("KafkaRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KafkaNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("KafkaRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KafkaNetworkPolicyRulesValidationError{ + field: fmt.Sprintf("KafkaRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return KafkaNetworkPolicyRulesMultiError(errors) + } + + return nil +} + +// KafkaNetworkPolicyRulesMultiError is an error wrapping multiple validation +// errors returned by KafkaNetworkPolicyRules.ValidateAll() if the designated +// constraints aren't met. +type KafkaNetworkPolicyRulesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KafkaNetworkPolicyRulesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KafkaNetworkPolicyRulesMultiError) AllErrors() []error { return m } + +// KafkaNetworkPolicyRulesValidationError is the validation error returned by +// KafkaNetworkPolicyRules.Validate if the designated constraints aren't met. +type KafkaNetworkPolicyRulesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KafkaNetworkPolicyRulesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KafkaNetworkPolicyRulesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KafkaNetworkPolicyRulesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KafkaNetworkPolicyRulesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KafkaNetworkPolicyRulesValidationError) ErrorName() string { + return "KafkaNetworkPolicyRulesValidationError" +} + +// Error satisfies the builtin error interface +func (e KafkaNetworkPolicyRulesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKafkaNetworkPolicyRules.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KafkaNetworkPolicyRulesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KafkaNetworkPolicyRulesValidationError{} + +// Validate checks the field values on KafkaNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *KafkaNetworkPolicyRule) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KafkaNetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KafkaNetworkPolicyRuleMultiError, or nil if none found. +func (m *KafkaNetworkPolicyRule) ValidateAll() error { + return m.validate(true) +} + +func (m *KafkaNetworkPolicyRule) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ApiVersion + + if !_KafkaNetworkPolicyRule_ClientId_Pattern.MatchString(m.GetClientId()) { + err := KafkaNetworkPolicyRuleValidationError{ + field: "ClientId", + reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetTopic()) > 255 { + err := KafkaNetworkPolicyRuleValidationError{ + field: "Topic", + reason: "value length must be at most 255 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_KafkaNetworkPolicyRule_Topic_Pattern.MatchString(m.GetTopic()) { + err := KafkaNetworkPolicyRuleValidationError{ + field: "Topic", + reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return KafkaNetworkPolicyRuleMultiError(errors) + } + + return nil +} + +// KafkaNetworkPolicyRuleMultiError is an error wrapping multiple validation +// errors returned by KafkaNetworkPolicyRule.ValidateAll() if the designated +// constraints aren't met. +type KafkaNetworkPolicyRuleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KafkaNetworkPolicyRuleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KafkaNetworkPolicyRuleMultiError) AllErrors() []error { return m } + +// KafkaNetworkPolicyRuleValidationError is the validation error returned by +// KafkaNetworkPolicyRule.Validate if the designated constraints aren't met. +type KafkaNetworkPolicyRuleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KafkaNetworkPolicyRuleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KafkaNetworkPolicyRuleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KafkaNetworkPolicyRuleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KafkaNetworkPolicyRuleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KafkaNetworkPolicyRuleValidationError) ErrorName() string { + return "KafkaNetworkPolicyRuleValidationError" +} + +// Error satisfies the builtin error interface +func (e KafkaNetworkPolicyRuleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKafkaNetworkPolicyRule.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KafkaNetworkPolicyRuleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KafkaNetworkPolicyRuleValidationError{} + +var _KafkaNetworkPolicyRule_ClientId_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$") + +var _KafkaNetworkPolicyRule_Topic_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$") + +// Validate checks the field values on L7NetworkPolicyRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *L7NetworkPolicyRules) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on L7NetworkPolicyRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// L7NetworkPolicyRulesMultiError, or nil if none found. +func (m *L7NetworkPolicyRules) ValidateAll() error { + return m.validate(true) +} + +func (m *L7NetworkPolicyRules) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetL7AllowRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7AllowRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7AllowRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7AllowRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetL7DenyRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7DenyRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7DenyRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return L7NetworkPolicyRulesValidationError{ + field: fmt.Sprintf("L7DenyRules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return L7NetworkPolicyRulesMultiError(errors) + } + + return nil +} + +// L7NetworkPolicyRulesMultiError is an error wrapping multiple validation +// errors returned by L7NetworkPolicyRules.ValidateAll() if the designated +// constraints aren't met. +type L7NetworkPolicyRulesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m L7NetworkPolicyRulesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m L7NetworkPolicyRulesMultiError) AllErrors() []error { return m } + +// L7NetworkPolicyRulesValidationError is the validation error returned by +// L7NetworkPolicyRules.Validate if the designated constraints aren't met. +type L7NetworkPolicyRulesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e L7NetworkPolicyRulesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e L7NetworkPolicyRulesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e L7NetworkPolicyRulesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e L7NetworkPolicyRulesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e L7NetworkPolicyRulesValidationError) ErrorName() string { + return "L7NetworkPolicyRulesValidationError" +} + +// Error satisfies the builtin error interface +func (e L7NetworkPolicyRulesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sL7NetworkPolicyRules.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = L7NetworkPolicyRulesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = L7NetworkPolicyRulesValidationError{} + +// Validate checks the field values on L7NetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *L7NetworkPolicyRule) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on L7NetworkPolicyRule with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// L7NetworkPolicyRuleMultiError, or nil if none found. +func (m *L7NetworkPolicyRule) ValidateAll() error { + return m.validate(true) +} + +func (m *L7NetworkPolicyRule) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for Rule + + for idx, item := range m.GetMetadataRule() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, L7NetworkPolicyRuleValidationError{ + field: fmt.Sprintf("MetadataRule[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, L7NetworkPolicyRuleValidationError{ + field: fmt.Sprintf("MetadataRule[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return L7NetworkPolicyRuleValidationError{ + field: fmt.Sprintf("MetadataRule[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return L7NetworkPolicyRuleMultiError(errors) + } + + return nil +} + +// L7NetworkPolicyRuleMultiError is an error wrapping multiple validation +// errors returned by L7NetworkPolicyRule.ValidateAll() if the designated +// constraints aren't met. +type L7NetworkPolicyRuleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m L7NetworkPolicyRuleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m L7NetworkPolicyRuleMultiError) AllErrors() []error { return m } + +// L7NetworkPolicyRuleValidationError is the validation error returned by +// L7NetworkPolicyRule.Validate if the designated constraints aren't met. +type L7NetworkPolicyRuleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e L7NetworkPolicyRuleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e L7NetworkPolicyRuleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e L7NetworkPolicyRuleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e L7NetworkPolicyRuleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e L7NetworkPolicyRuleValidationError) ErrorName() string { + return "L7NetworkPolicyRuleValidationError" +} + +// Error satisfies the builtin error interface +func (e L7NetworkPolicyRuleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sL7NetworkPolicyRule.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = L7NetworkPolicyRuleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = L7NetworkPolicyRuleValidationError{} + +// Validate checks the field values on NetworkPoliciesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NetworkPoliciesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NetworkPoliciesConfigDump with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NetworkPoliciesConfigDumpMultiError, or nil if none found. +func (m *NetworkPoliciesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *NetworkPoliciesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetNetworkpolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NetworkPoliciesConfigDumpValidationError{ + field: fmt.Sprintf("Networkpolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NetworkPoliciesConfigDumpValidationError{ + field: fmt.Sprintf("Networkpolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NetworkPoliciesConfigDumpValidationError{ + field: fmt.Sprintf("Networkpolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return NetworkPoliciesConfigDumpMultiError(errors) + } + + return nil +} + +// NetworkPoliciesConfigDumpMultiError is an error wrapping multiple validation +// errors returned by NetworkPoliciesConfigDump.ValidateAll() if the +// designated constraints aren't met. +type NetworkPoliciesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NetworkPoliciesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NetworkPoliciesConfigDumpMultiError) AllErrors() []error { return m } + +// NetworkPoliciesConfigDumpValidationError is the validation error returned by +// NetworkPoliciesConfigDump.Validate if the designated constraints aren't met. +type NetworkPoliciesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NetworkPoliciesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NetworkPoliciesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NetworkPoliciesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NetworkPoliciesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NetworkPoliciesConfigDumpValidationError) ErrorName() string { + return "NetworkPoliciesConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e NetworkPoliciesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNetworkPoliciesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NetworkPoliciesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NetworkPoliciesConfigDumpValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go new file mode 100644 index 0000000000..0f3b9c65e0 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go @@ -0,0 +1,362 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/nphds.proto + +package cilium + +import ( + context "context" + _ "github.com/cilium/proxy/go/envoy/annotations" + v3 "github.com/cilium/proxy/go/envoy/service/discovery/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The mapping of a network policy identifier to the IP addresses of all the +// hosts on which the network policy is enforced. +// A host may be associated only with one network policy. +type NetworkPolicyHosts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique identifier of the network policy enforced on the hosts. + Policy uint64 `protobuf:"varint,1,opt,name=policy,proto3" json:"policy,omitempty"` + // The set of IP addresses of the hosts on which the network policy is + // enforced. Optional. May be empty. + HostAddresses []string `protobuf:"bytes,2,rep,name=host_addresses,json=hostAddresses,proto3" json:"host_addresses,omitempty"` +} + +func (x *NetworkPolicyHosts) Reset() { + *x = NetworkPolicyHosts{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_nphds_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkPolicyHosts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkPolicyHosts) ProtoMessage() {} + +func (x *NetworkPolicyHosts) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_nphds_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkPolicyHosts.ProtoReflect.Descriptor instead. +func (*NetworkPolicyHosts) Descriptor() ([]byte, []int) { + return file_cilium_api_nphds_proto_rawDescGZIP(), []int{0} +} + +func (x *NetworkPolicyHosts) GetPolicy() uint64 { + if x != nil { + return x.Policy + } + return 0 +} + +func (x *NetworkPolicyHosts) GetHostAddresses() []string { + if x != nil { + return x.HostAddresses + } + return nil +} + +var File_cilium_api_nphds_proto protoreflect.FileDescriptor + +var file_cilium_api_nphds_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x70, 0x68, + 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, + 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x12, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x3b, 0x0a, 0x0e, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x14, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x18, 0x01, 0xfa, 0x42, 0x09, 0x92, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x32, 0xee, 0x02, 0x0a, 0x22, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, + 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xa5, 0x01, 0x0a, 0x17, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73, + 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x22, 0x22, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x21, + 0x8a, 0xa4, 0x96, 0xf3, 0x07, 0x1b, 0x0a, 0x19, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74, + 0x73, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, + 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_nphds_proto_rawDescOnce sync.Once + file_cilium_api_nphds_proto_rawDescData = file_cilium_api_nphds_proto_rawDesc +) + +func file_cilium_api_nphds_proto_rawDescGZIP() []byte { + file_cilium_api_nphds_proto_rawDescOnce.Do(func() { + file_cilium_api_nphds_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_nphds_proto_rawDescData) + }) + return file_cilium_api_nphds_proto_rawDescData +} + +var file_cilium_api_nphds_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_cilium_api_nphds_proto_goTypes = []interface{}{ + (*NetworkPolicyHosts)(nil), // 0: cilium.NetworkPolicyHosts + (*v3.DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest + (*v3.DiscoveryResponse)(nil), // 2: envoy.service.discovery.v3.DiscoveryResponse +} +var file_cilium_api_nphds_proto_depIdxs = []int32{ + 1, // 0: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 1, // 1: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 2, // 2: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 2, // 3: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cilium_api_nphds_proto_init() } +func file_cilium_api_nphds_proto_init() { + if File_cilium_api_nphds_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_nphds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkPolicyHosts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_nphds_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cilium_api_nphds_proto_goTypes, + DependencyIndexes: file_cilium_api_nphds_proto_depIdxs, + MessageInfos: file_cilium_api_nphds_proto_msgTypes, + }.Build() + File_cilium_api_nphds_proto = out.File + file_cilium_api_nphds_proto_rawDesc = nil + file_cilium_api_nphds_proto_goTypes = nil + file_cilium_api_nphds_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NetworkPolicyHostsDiscoveryServiceClient is the client API for NetworkPolicyHostsDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NetworkPolicyHostsDiscoveryServiceClient interface { + StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error) + FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error) +} + +type networkPolicyHostsDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNetworkPolicyHostsDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyHostsDiscoveryServiceClient { + return &networkPolicyHostsDiscoveryServiceClient{cc} +} + +func (c *networkPolicyHostsDiscoveryServiceClient) StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error) { + stream, err := c.cc.NewStream(ctx, &_NetworkPolicyHostsDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyHostsDiscoveryService/StreamNetworkPolicyHosts", opts...) + if err != nil { + return nil, err + } + x := &networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient{stream} + return x, nil +} + +type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient interface { + Send(*v3.DiscoveryRequest) error + Recv() (*v3.DiscoveryResponse, error) + grpc.ClientStream +} + +type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient struct { + grpc.ClientStream +} + +func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Send(m *v3.DiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Recv() (*v3.DiscoveryResponse, error) { + m := new(v3.DiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *networkPolicyHostsDiscoveryServiceClient) FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error) { + out := new(v3.DiscoveryResponse) + err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NetworkPolicyHostsDiscoveryServiceServer is the server API for NetworkPolicyHostsDiscoveryService service. +type NetworkPolicyHostsDiscoveryServiceServer interface { + StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error + FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) +} + +// UnimplementedNetworkPolicyHostsDiscoveryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNetworkPolicyHostsDiscoveryServiceServer struct { +} + +func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicyHosts not implemented") +} +func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicyHosts not implemented") +} + +func RegisterNetworkPolicyHostsDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyHostsDiscoveryServiceServer) { + s.RegisterService(&_NetworkPolicyHostsDiscoveryService_serviceDesc, srv) +} + +func _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(NetworkPolicyHostsDiscoveryServiceServer).StreamNetworkPolicyHosts(&networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer{stream}) +} + +type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer interface { + Send(*v3.DiscoveryResponse) error + Recv() (*v3.DiscoveryRequest, error) + grpc.ServerStream +} + +type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer struct { + grpc.ServerStream +} + +func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Send(m *v3.DiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Recv() (*v3.DiscoveryRequest, error) { + m := new(v3.DiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v3.DiscoveryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, req.(*v3.DiscoveryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NetworkPolicyHostsDiscoveryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cilium.NetworkPolicyHostsDiscoveryService", + HandlerType: (*NetworkPolicyHostsDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchNetworkPolicyHosts", + Handler: _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamNetworkPolicyHosts", + Handler: _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "cilium/api/nphds.proto", +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go new file mode 100644 index 0000000000..6ea2012214 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/nphds.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on NetworkPolicyHosts with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NetworkPolicyHosts) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NetworkPolicyHosts with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NetworkPolicyHostsMultiError, or nil if none found. +func (m *NetworkPolicyHosts) ValidateAll() error { + return m.validate(true) +} + +func (m *NetworkPolicyHosts) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Policy + + _NetworkPolicyHosts_HostAddresses_Unique := make(map[string]struct{}, len(m.GetHostAddresses())) + + for idx, item := range m.GetHostAddresses() { + _, _ = idx, item + + if _, exists := _NetworkPolicyHosts_HostAddresses_Unique[item]; exists { + err := NetworkPolicyHostsValidationError{ + field: fmt.Sprintf("HostAddresses[%v]", idx), + reason: "repeated value must contain unique items", + } + if !all { + return err + } + errors = append(errors, err) + } else { + _NetworkPolicyHosts_HostAddresses_Unique[item] = struct{}{} + } + + if utf8.RuneCountInString(item) < 1 { + err := NetworkPolicyHostsValidationError{ + field: fmt.Sprintf("HostAddresses[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return NetworkPolicyHostsMultiError(errors) + } + + return nil +} + +// NetworkPolicyHostsMultiError is an error wrapping multiple validation errors +// returned by NetworkPolicyHosts.ValidateAll() if the designated constraints +// aren't met. +type NetworkPolicyHostsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NetworkPolicyHostsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NetworkPolicyHostsMultiError) AllErrors() []error { return m } + +// NetworkPolicyHostsValidationError is the validation error returned by +// NetworkPolicyHosts.Validate if the designated constraints aren't met. +type NetworkPolicyHostsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NetworkPolicyHostsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NetworkPolicyHostsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NetworkPolicyHostsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NetworkPolicyHostsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NetworkPolicyHostsValidationError) ErrorName() string { + return "NetworkPolicyHostsValidationError" +} + +// Error satisfies the builtin error interface +func (e NetworkPolicyHostsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNetworkPolicyHosts.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NetworkPolicyHostsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NetworkPolicyHostsValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go new file mode 100644 index 0000000000..e46dbabcd3 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/tls_wrapper.proto + +package cilium + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Empty configuration messages for Cilium TLS wrapper to make Envoy happy +type UpstreamTlsWrapperContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpstreamTlsWrapperContext) Reset() { + *x = UpstreamTlsWrapperContext{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamTlsWrapperContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamTlsWrapperContext) ProtoMessage() {} + +func (x *UpstreamTlsWrapperContext) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamTlsWrapperContext.ProtoReflect.Descriptor instead. +func (*UpstreamTlsWrapperContext) Descriptor() ([]byte, []int) { + return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{0} +} + +type DownstreamTlsWrapperContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DownstreamTlsWrapperContext) Reset() { + *x = DownstreamTlsWrapperContext{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownstreamTlsWrapperContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownstreamTlsWrapperContext) ProtoMessage() {} + +func (x *DownstreamTlsWrapperContext) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownstreamTlsWrapperContext.ProtoReflect.Descriptor instead. +func (*DownstreamTlsWrapperContext) Descriptor() ([]byte, []int) { + return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{1} +} + +var File_cilium_api_tls_wrapper_proto protoreflect.FileDescriptor + +var file_cilium_api_tls_wrapper_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x6c, 0x73, + 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0x1b, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x54, 0x6c, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, + 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_tls_wrapper_proto_rawDescOnce sync.Once + file_cilium_api_tls_wrapper_proto_rawDescData = file_cilium_api_tls_wrapper_proto_rawDesc +) + +func file_cilium_api_tls_wrapper_proto_rawDescGZIP() []byte { + file_cilium_api_tls_wrapper_proto_rawDescOnce.Do(func() { + file_cilium_api_tls_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_tls_wrapper_proto_rawDescData) + }) + return file_cilium_api_tls_wrapper_proto_rawDescData +} + +var file_cilium_api_tls_wrapper_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cilium_api_tls_wrapper_proto_goTypes = []interface{}{ + (*UpstreamTlsWrapperContext)(nil), // 0: cilium.UpstreamTlsWrapperContext + (*DownstreamTlsWrapperContext)(nil), // 1: cilium.DownstreamTlsWrapperContext +} +var file_cilium_api_tls_wrapper_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cilium_api_tls_wrapper_proto_init() } +func file_cilium_api_tls_wrapper_proto_init() { + if File_cilium_api_tls_wrapper_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_tls_wrapper_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamTlsWrapperContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_tls_wrapper_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownstreamTlsWrapperContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_tls_wrapper_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_tls_wrapper_proto_goTypes, + DependencyIndexes: file_cilium_api_tls_wrapper_proto_depIdxs, + MessageInfos: file_cilium_api_tls_wrapper_proto_msgTypes, + }.Build() + File_cilium_api_tls_wrapper_proto = out.File + file_cilium_api_tls_wrapper_proto_rawDesc = nil + file_cilium_api_tls_wrapper_proto_goTypes = nil + file_cilium_api_tls_wrapper_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go new file mode 100644 index 0000000000..b5da26f8bf --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/tls_wrapper.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpstreamTlsWrapperContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamTlsWrapperContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamTlsWrapperContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamTlsWrapperContextMultiError, or nil if none found. +func (m *UpstreamTlsWrapperContext) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamTlsWrapperContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return UpstreamTlsWrapperContextMultiError(errors) + } + + return nil +} + +// UpstreamTlsWrapperContextMultiError is an error wrapping multiple validation +// errors returned by UpstreamTlsWrapperContext.ValidateAll() if the +// designated constraints aren't met. +type UpstreamTlsWrapperContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamTlsWrapperContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamTlsWrapperContextMultiError) AllErrors() []error { return m } + +// UpstreamTlsWrapperContextValidationError is the validation error returned by +// UpstreamTlsWrapperContext.Validate if the designated constraints aren't met. +type UpstreamTlsWrapperContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamTlsWrapperContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamTlsWrapperContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamTlsWrapperContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamTlsWrapperContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamTlsWrapperContextValidationError) ErrorName() string { + return "UpstreamTlsWrapperContextValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamTlsWrapperContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamTlsWrapperContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamTlsWrapperContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamTlsWrapperContextValidationError{} + +// Validate checks the field values on DownstreamTlsWrapperContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DownstreamTlsWrapperContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DownstreamTlsWrapperContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DownstreamTlsWrapperContextMultiError, or nil if none found. +func (m *DownstreamTlsWrapperContext) ValidateAll() error { + return m.validate(true) +} + +func (m *DownstreamTlsWrapperContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DownstreamTlsWrapperContextMultiError(errors) + } + + return nil +} + +// DownstreamTlsWrapperContextMultiError is an error wrapping multiple +// validation errors returned by DownstreamTlsWrapperContext.ValidateAll() if +// the designated constraints aren't met. +type DownstreamTlsWrapperContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DownstreamTlsWrapperContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DownstreamTlsWrapperContextMultiError) AllErrors() []error { return m } + +// DownstreamTlsWrapperContextValidationError is the validation error returned +// by DownstreamTlsWrapperContext.Validate if the designated constraints +// aren't met. +type DownstreamTlsWrapperContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DownstreamTlsWrapperContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DownstreamTlsWrapperContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DownstreamTlsWrapperContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DownstreamTlsWrapperContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DownstreamTlsWrapperContextValidationError) ErrorName() string { + return "DownstreamTlsWrapperContextValidationError" +} + +// Error satisfies the builtin error interface +func (e DownstreamTlsWrapperContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDownstreamTlsWrapperContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DownstreamTlsWrapperContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DownstreamTlsWrapperContextValidationError{} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go new file mode 100644 index 0000000000..9ce27a2f89 --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go @@ -0,0 +1,411 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v4.23.1 +// source: cilium/api/websocket.proto + +package cilium + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type WebSocketClient struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path to the unix domain socket for the cilium access log, if any. + AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` + // Host header value, required. + Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` + // Path value. Defaults to "/". + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` + // sec-websocket-key value to use, defaults to a random key. + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + // Websocket version, defaults to "13". + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + // Origin header, if any. + Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"` + // Websocket handshake timeout, default is 5 seconds. + HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"` + // ping interval, default is 0 (disabled). + // Connection is assumed dead if response is not received before the next ping is to be sent. + PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"` + // ping only on when idle on both directions. + // ping_interval must be non-zero when this is true. + PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"` +} + +func (x *WebSocketClient) Reset() { + *x = WebSocketClient{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_websocket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WebSocketClient) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WebSocketClient) ProtoMessage() {} + +func (x *WebSocketClient) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_websocket_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WebSocketClient.ProtoReflect.Descriptor instead. +func (*WebSocketClient) Descriptor() ([]byte, []int) { + return file_cilium_api_websocket_proto_rawDescGZIP(), []int{0} +} + +func (x *WebSocketClient) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +func (x *WebSocketClient) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *WebSocketClient) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *WebSocketClient) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *WebSocketClient) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *WebSocketClient) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *WebSocketClient) GetHandshakeTimeout() *durationpb.Duration { + if x != nil { + return x.HandshakeTimeout + } + return nil +} + +func (x *WebSocketClient) GetPingInterval() *durationpb.Duration { + if x != nil { + return x.PingInterval + } + return nil +} + +func (x *WebSocketClient) GetPingWhenIdle() bool { + if x != nil { + return x.PingWhenIdle + } + return false +} + +type WebSocketServer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path to the unix domain socket for the cilium access log, if any. + AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` + // Expected host header value, if any. + Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` + // Expected path value, if any. + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` + // sec-websocket-key value to expect, if any. + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + // Websocket version, ignored if omitted. + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + // Origin header, if any. Origin header is not allowed if omitted. + Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"` + // Websocket handshake timeout, default is 5 seconds. + HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"` + // ping interval, default is 0 (disabled). + // Connection is assumed dead if response is not received before the next ping is to be sent. + PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"` + // ping only on when idle on both directions. + // ping_interval must be non-zero when this is true. + PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"` +} + +func (x *WebSocketServer) Reset() { + *x = WebSocketServer{} + if protoimpl.UnsafeEnabled { + mi := &file_cilium_api_websocket_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WebSocketServer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WebSocketServer) ProtoMessage() {} + +func (x *WebSocketServer) ProtoReflect() protoreflect.Message { + mi := &file_cilium_api_websocket_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WebSocketServer.ProtoReflect.Descriptor instead. +func (*WebSocketServer) Descriptor() ([]byte, []int) { + return file_cilium_api_websocket_proto_rawDescGZIP(), []int{1} +} + +func (x *WebSocketServer) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +func (x *WebSocketServer) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *WebSocketServer) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *WebSocketServer) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *WebSocketServer) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *WebSocketServer) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *WebSocketServer) GetHandshakeTimeout() *durationpb.Duration { + if x != nil { + return x.HandshakeTimeout + } + return nil +} + +func (x *WebSocketServer) GetPingInterval() *durationpb.Duration { + if x != nil { + return x.PingInterval + } + return nil +} + +func (x *WebSocketServer) GetPingWhenIdle() bool { + if x != nil { + return x.PingWhenIdle + } + return false +} + +var File_cilium_api_websocket_proto protoreflect.FileDescriptor + +var file_cilium_api_websocket_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x65, 0x62, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x02, + 0x0a, 0x0f, 0x57, 0x65, 0x62, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x04, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x02, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x46, + 0x0a, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x77, + 0x68, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x70, 0x69, 0x6e, 0x67, 0x57, 0x68, 0x65, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x22, 0xd3, 0x02, 0x0a, + 0x0f, 0x57, 0x65, 0x62, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x12, 0x46, 0x0a, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, + 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x70, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e, + 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x57, 0x68, 0x65, 0x6e, 0x49, 0x64, + 0x6c, 0x65, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, + 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cilium_api_websocket_proto_rawDescOnce sync.Once + file_cilium_api_websocket_proto_rawDescData = file_cilium_api_websocket_proto_rawDesc +) + +func file_cilium_api_websocket_proto_rawDescGZIP() []byte { + file_cilium_api_websocket_proto_rawDescOnce.Do(func() { + file_cilium_api_websocket_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_websocket_proto_rawDescData) + }) + return file_cilium_api_websocket_proto_rawDescData +} + +var file_cilium_api_websocket_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cilium_api_websocket_proto_goTypes = []interface{}{ + (*WebSocketClient)(nil), // 0: cilium.WebSocketClient + (*WebSocketServer)(nil), // 1: cilium.WebSocketServer + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration +} +var file_cilium_api_websocket_proto_depIdxs = []int32{ + 2, // 0: cilium.WebSocketClient.handshake_timeout:type_name -> google.protobuf.Duration + 2, // 1: cilium.WebSocketClient.ping_interval:type_name -> google.protobuf.Duration + 2, // 2: cilium.WebSocketServer.handshake_timeout:type_name -> google.protobuf.Duration + 2, // 3: cilium.WebSocketServer.ping_interval:type_name -> google.protobuf.Duration + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_cilium_api_websocket_proto_init() } +func file_cilium_api_websocket_proto_init() { + if File_cilium_api_websocket_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cilium_api_websocket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WebSocketClient); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cilium_api_websocket_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WebSocketServer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cilium_api_websocket_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cilium_api_websocket_proto_goTypes, + DependencyIndexes: file_cilium_api_websocket_proto_depIdxs, + MessageInfos: file_cilium_api_websocket_proto_msgTypes, + }.Build() + File_cilium_api_websocket_proto = out.File + file_cilium_api_websocket_proto_rawDesc = nil + file_cilium_api_websocket_proto_goTypes = nil + file_cilium_api_websocket_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go new file mode 100644 index 0000000000..9f7a8b25ce --- /dev/null +++ b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: cilium/api/websocket.proto + +package cilium + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on WebSocketClient with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WebSocketClient) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WebSocketClient with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WebSocketClientMultiError, or nil if none found. +func (m *WebSocketClient) ValidateAll() error { + return m.validate(true) +} + +func (m *WebSocketClient) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AccessLogPath + + if utf8.RuneCountInString(m.GetHost()) < 2 { + err := WebSocketClientValidationError{ + field: "Host", + reason: "value length must be at least 2 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Path + + // no validation rules for Key + + // no validation rules for Version + + // no validation rules for Origin + + if all { + switch v := interface{}(m.GetHandshakeTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WebSocketClientValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WebSocketClientValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WebSocketClientValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WebSocketClientValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WebSocketClientValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WebSocketClientValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PingWhenIdle + + if len(errors) > 0 { + return WebSocketClientMultiError(errors) + } + + return nil +} + +// WebSocketClientMultiError is an error wrapping multiple validation errors +// returned by WebSocketClient.ValidateAll() if the designated constraints +// aren't met. +type WebSocketClientMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WebSocketClientMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WebSocketClientMultiError) AllErrors() []error { return m } + +// WebSocketClientValidationError is the validation error returned by +// WebSocketClient.Validate if the designated constraints aren't met. +type WebSocketClientValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WebSocketClientValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WebSocketClientValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WebSocketClientValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WebSocketClientValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WebSocketClientValidationError) ErrorName() string { return "WebSocketClientValidationError" } + +// Error satisfies the builtin error interface +func (e WebSocketClientValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWebSocketClient.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WebSocketClientValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WebSocketClientValidationError{} + +// Validate checks the field values on WebSocketServer with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WebSocketServer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WebSocketServer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WebSocketServerMultiError, or nil if none found. +func (m *WebSocketServer) ValidateAll() error { + return m.validate(true) +} + +func (m *WebSocketServer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AccessLogPath + + // no validation rules for Host + + // no validation rules for Path + + // no validation rules for Key + + // no validation rules for Version + + // no validation rules for Origin + + if all { + switch v := interface{}(m.GetHandshakeTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WebSocketServerValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WebSocketServerValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WebSocketServerValidationError{ + field: "HandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WebSocketServerValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WebSocketServerValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WebSocketServerValidationError{ + field: "PingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PingWhenIdle + + if len(errors) > 0 { + return WebSocketServerMultiError(errors) + } + + return nil +} + +// WebSocketServerMultiError is an error wrapping multiple validation errors +// returned by WebSocketServer.ValidateAll() if the designated constraints +// aren't met. +type WebSocketServerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WebSocketServerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WebSocketServerMultiError) AllErrors() []error { return m } + +// WebSocketServerValidationError is the validation error returned by +// WebSocketServer.Validate if the designated constraints aren't met. +type WebSocketServerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WebSocketServerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WebSocketServerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WebSocketServerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WebSocketServerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WebSocketServerValidationError) ErrorName() string { return "WebSocketServerValidationError" } + +// Error satisfies the builtin error interface +func (e WebSocketServerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWebSocketServer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WebSocketServerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WebSocketServerValidationError{} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 0000000000..eb9fb7ff2d --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 0000000000..e256b41a5d --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go new file mode 100644 index 0000000000..ac24c7767d --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -0,0 +1,46 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "fmt" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go new file mode 100644 index 0000000000..c5b23a8196 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -0,0 +1,267 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + if c := getOrInitConn(); c == nil { + return false + } + + conn, err := net.Dial("unixgram", journalSocket) + if err != nil { + return false + } + defer conn.Close() + + return true +} + +// StderrIsJournalStream returns whether the process stderr is connected +// to the Journal's stream transport. +// +// This can be used for automatic protocol upgrading described in [Journal Native Protocol]. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stderr's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading +func StderrIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stderr) +} + +// StdoutIsJournalStream returns whether the process stdout is connected +// to the Journal's stream transport. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stdout's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// Most users should probably use [StderrIsJournalStream]. +func StdoutIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stdout) +} + +func fdIsJournalStream(fd int) (bool, error) { + journalStream := os.Getenv("JOURNAL_STREAM") + if journalStream == "" { + return false, nil + } + + var expectedStat syscall.Stat_t + _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino) + if err != nil { + return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err) + } + + var stat syscall.Stat_t + err = syscall.Fstat(fd, &stat) + if err != nil { + return false, err + } + + match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino + return match, nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := getOrInitConn() + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary +func getOrInitConn() *net.UnixConn { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn != nil { + return conn + } + onceConn.Do(initConn) + return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is automatically called when needed. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go new file mode 100644 index 0000000000..322e41e74c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -0,0 +1,43 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "errors" +) + +func Enabled() bool { + return false +} + +func Send(message string, priority Priority, vars map[string]string) error { + return errors.New("could not initialize socket to journald") +} + +func StderrIsJournalStream() (bool, error) { + return false, nil +} + +func StdoutIsJournalStream() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 0000000000..0b4659b731 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 0000000000..081c86fa8e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..1e91766aee --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 0000000000..f6502e4b90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 0000000000..b80c85653f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 0000000000..390d4e4be6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 0000000000..3496dc99d5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 0000000000..a85bf1984c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..18b2a3318a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 0000000000..165b2110df --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 0000000000..e0846a357d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/hashicorp/consul/api/.copywrite.hcl b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl new file mode 100644 index 0000000000..34d99ba25e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2023 + + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE new file mode 100644 index 0000000000..7c5baa45e1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2020 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 0000000000..96a867f279 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,77 @@ +# Consul API Client + +This package provides the `api` package which provides programmatic access to the full Consul API. + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api). + +## Usage + +Below is an example of using the Consul client. To run the example, you must first +[install Consul](https://developer.hashicorp.com/consul/downloads) and +[Go](https://go.dev/doc/install). + +To run the client API, create a new Go module. + +```shell +go mod init consul-demo +``` + +Copy the example code into a file called `main.go` in the directory where the module is defined. +As seen in the example, the Consul API is often imported with the alias `capi`. + +```go +package main + +import ( + "fmt" + + capi "github.com/hashicorp/consul/api" +) + +func main() { + // Get a new client + client, err := capi.NewClient(capi.DefaultConfig()) + if err != nil { + panic(err) + } + + // Get a handle to the KV API + kv := client.KV() + + // PUT a new KV pair + p := &capi.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")} + _, err = kv.Put(p, nil) + if err != nil { + panic(err) + } + + // Lookup the pair + pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil) + if err != nil { + panic(err) + } + fmt.Printf("KV: %v %s\n", pair.Key, pair.Value) +} +``` + +Install the Consul API dependency with `go mod tidy`. + +In a separate terminal window, start a local Consul server. + +```shell +consul agent -dev -node machine +``` + +Run the example. + +```shell +go run . +``` + +You should get the following result printed to the terminal. + +```shell +KV: REDIS_MAXCLIENTS 1000 +``` + +After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 0000000000..47b38eb6ca --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,1738 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "time" + + "github.com/mitchellh/mapstructure" +) + +const ( + // ACLClientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" + + // ACLTemplatedPolicy names + ACLTemplatedPolicyServiceName = "builtin/service" + ACLTemplatedPolicyNodeName = "builtin/node" + ACLTemplatedPolicyDNSName = "builtin/dns" + ACLTemplatedPolicyNomadServerName = "builtin/nomad-server" + ACLTemplatedPolicyWorkloadIdentityName = "builtin/workload-identity" +) + +type ACLLink struct { + ID string + Name string +} + +type ACLTokenPolicyLink = ACLLink +type ACLTokenRoleLink = ACLLink + +// ACLToken represents an ACL Token +type ACLToken struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` + Local bool + AuthMethod string `json:",omitempty"` + ExpirationTTL time.Duration `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time `json:",omitempty"` + Hash []byte `json:",omitempty"` + + // DEPRECATED (ACL-Legacy-Compat) + // Rules are an artifact of legacy tokens deprecated in Consul 1.4 + Rules string `json:"-"` + + // Namespace is the namespace the ACLToken is associated with. + // Namespaces are a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLToken is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // AuthMethodNamespace is the namespace the token's AuthMethod is associated with. + // Namespacing is a Consul Enterprise feature. + AuthMethodNamespace string `json:",omitempty"` +} + +type ACLTokenExpanded struct { + ExpandedPolicies []ACLPolicy + ExpandedRoles []ACLRole + + NamespaceDefaultPolicyIDs []string + NamespaceDefaultRoleIDs []string + + AgentACLDefaultPolicy string + AgentACLDownPolicy string + ResolvedByAgent string + + ACLToken +} + +type ACLTokenListEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` + Local bool + AuthMethod string `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time + Hash []byte + Legacy bool `json:"-"` // DEPRECATED + + // Namespace is the namespace the ACLTokenListEntry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLTokenListEntry is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // AuthMethodNamespace is the namespace the token's AuthMethod is associated with. + // Namespacing is a Consul Enterprise feature. + AuthMethodNamespace string `json:",omitempty"` +} + +// ACLEntry is used to represent a legacy ACL token +// The legacy tokens are deprecated. +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicationType string + ReplicatedIndex uint64 + ReplicatedRoleIndex uint64 + ReplicatedTokenIndex uint64 + LastSuccess time.Time + LastError time.Time + LastErrorMessage string +} + +// ACLServiceIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Service in the Catalog and within +// Connect. +type ACLServiceIdentity struct { + ServiceName string + Datacenters []string `json:",omitempty"` +} + +// ACLNodeIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Node in the Catalog and within Connect. +type ACLNodeIdentity struct { + NodeName string + Datacenter string +} + +// ACLTemplatedPolicy represents a template used to generate a `synthetic` policy +// given some input variables. +type ACLTemplatedPolicy struct { + TemplateName string + TemplateVariables *ACLTemplatedPolicyVariables `json:",omitempty"` + + // Datacenters are an artifact of Nodeidentity & ServiceIdentity. + // It is used to facilitate the future migration away from both + Datacenters []string `json:",omitempty"` +} + +type ACLTemplatedPolicyResponse struct { + TemplateName string + Schema string + Template string +} + +type ACLTemplatedPolicyVariables struct { + Name string +} + +// ACLPolicy represents an ACL Policy. +type ACLPolicy struct { + ID string + Name string + Description string + Rules string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 + + // Namespace is the namespace the ACLPolicy is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLPolicy is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +type ACLPolicyListEntry struct { + ID string + Name string + Description string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 + + // Namespace is the namespace the ACLPolicyListEntry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLPolicyListEntry is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +type ACLRolePolicyLink = ACLLink + +// ACLRole represents an ACL Role. +type ACLRole struct { + ID string + Name string + Description string + Policies []*ACLRolePolicyLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 + + // Namespace is the namespace the ACLRole is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLRole is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +// BindingRuleBindType is the type of binding rule mechanism used. +type BindingRuleBindType string + +const ( + // BindingRuleBindTypeService binds to a service identity with the given name. + BindingRuleBindTypeService BindingRuleBindType = "service" + + // BindingRuleBindTypeRole binds to pre-existing roles with the given name. + BindingRuleBindTypeRole BindingRuleBindType = "role" + + // BindingRuleBindTypeNode binds to a node identity with given name. + BindingRuleBindTypeNode BindingRuleBindType = "node" + + // BindingRuleBindTypeTemplatedPolicy binds to a templated policy with given template name and variables. + BindingRuleBindTypeTemplatedPolicy BindingRuleBindType = "templated-policy" +) + +type ACLBindingRule struct { + ID string + Description string + AuthMethod string + Selector string + BindType BindingRuleBindType + BindName string + BindVars *ACLTemplatedPolicyVariables `json:",omitempty"` + + CreateIndex uint64 + ModifyIndex uint64 + + // Namespace is the namespace the ACLBindingRule is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLBindingRule is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +type ACLAuthMethod struct { + Name string + Type string + DisplayName string `json:",omitempty"` + Description string `json:",omitempty"` + MaxTokenTTL time.Duration `json:",omitempty"` + + // TokenLocality defines the kind of token that this auth method produces. + // This can be either 'local' or 'global'. If empty 'local' is assumed. + TokenLocality string `json:",omitempty"` + + // Configuration is arbitrary configuration for the auth method. This + // should only contain primitive values and containers (such as lists and + // maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 + + // NamespaceRules apply only on auth methods defined in the default namespace. + // Namespacing is a Consul Enterprise feature. + NamespaceRules []*ACLAuthMethodNamespaceRule `json:",omitempty"` + + // Namespace is the namespace the ACLAuthMethod is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLAuthMethod is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +type ACLTokenFilterOptions struct { + AuthMethod string `json:",omitempty"` + Policy string `json:",omitempty"` + Role string `json:",omitempty"` + ServiceName string `json:",omitempty"` +} + +func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) { + type Alias ACLAuthMethod + exported := &struct { + MaxTokenTTL string `json:",omitempty"` + *Alias + }{ + MaxTokenTTL: m.MaxTokenTTL.String(), + Alias: (*Alias)(m), + } + if m.MaxTokenTTL == 0 { + exported.MaxTokenTTL = "" + } + + return json.Marshal(exported) +} + +func (m *ACLAuthMethod) UnmarshalJSON(data []byte) error { + type Alias ACLAuthMethod + aux := &struct { + MaxTokenTTL string + *Alias + }{ + Alias: (*Alias)(m), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.MaxTokenTTL != "" { + if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { + return err + } + } + + return nil +} + +type ACLAuthMethodNamespaceRule struct { + // Selector is an expression that matches against verified identity + // attributes returned from the auth method during login. + Selector string `json:",omitempty"` + + // BindNamespace is the target namespace of the binding. Can be lightly + // templated using HIL ${foo} syntax from available field names. + // + // If empty it's created in the same namespace as the auth method. + BindNamespace string `json:",omitempty"` +} + +type ACLAuthMethodListEntry struct { + Name string + Type string + DisplayName string `json:",omitempty"` + Description string `json:",omitempty"` + MaxTokenTTL time.Duration `json:",omitempty"` + + // TokenLocality defines the kind of token that this auth method produces. + // This can be either 'local' or 'global'. If empty 'local' is assumed. + TokenLocality string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 + + // Namespace is the namespace the ACLAuthMethodListEntry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the ACLAuthMethodListEntry is associated with. + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +// This is nearly identical to the ACLAuthMethod MarshalJSON +func (m *ACLAuthMethodListEntry) MarshalJSON() ([]byte, error) { + type Alias ACLAuthMethodListEntry + exported := &struct { + MaxTokenTTL string `json:",omitempty"` + *Alias + }{ + MaxTokenTTL: m.MaxTokenTTL.String(), + Alias: (*Alias)(m), + } + if m.MaxTokenTTL == 0 { + exported.MaxTokenTTL = "" + } + + return json.Marshal(exported) +} + +// This is nearly identical to the ACLAuthMethod UnmarshalJSON +func (m *ACLAuthMethodListEntry) UnmarshalJSON(data []byte) error { + type Alias ACLAuthMethodListEntry + aux := &struct { + MaxTokenTTL string + *Alias + }{ + Alias: (*Alias)(m), + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.MaxTokenTTL != "" { + if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { + return err + } + } + + return nil +} + +// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed +// KubernetesAuthMethodConfig. +func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { + var config KubernetesAuthMethodConfig + decodeConf := &mapstructure.DecoderConfig{ + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// KubernetesAuthMethodConfig is the config for the built-in Consul auth method +// for Kubernetes. +type KubernetesAuthMethodConfig struct { + Host string `json:",omitempty"` + CACert string `json:",omitempty"` + ServiceAccountJWT string `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + "Host": c.Host, + "CACert": c.CACert, + "ServiceAccountJWT": c.ServiceAccountJWT, + } +} + +// OIDCAuthMethodConfig is the config for the built-in Consul auth method for +// OIDC and JWT. +type OIDCAuthMethodConfig struct { + // common for type=oidc and type=jwt + JWTSupportedAlgs []string `json:",omitempty"` + BoundAudiences []string `json:",omitempty"` + ClaimMappings map[string]string `json:",omitempty"` + ListClaimMappings map[string]string `json:",omitempty"` + OIDCDiscoveryURL string `json:",omitempty"` + OIDCDiscoveryCACert string `json:",omitempty"` + // just for type=oidc + OIDCClientID string `json:",omitempty"` + OIDCClientSecret string `json:",omitempty"` + OIDCScopes []string `json:",omitempty"` + OIDCACRValues []string `json:",omitempty"` + AllowedRedirectURIs []string `json:",omitempty"` + VerboseOIDCLogging bool `json:",omitempty"` + // just for type=jwt + JWKSURL string `json:",omitempty"` + JWKSCACert string `json:",omitempty"` + JWTValidationPubKeys []string `json:",omitempty"` + BoundIssuer string `json:",omitempty"` + ExpirationLeeway time.Duration `json:",omitempty"` + NotBeforeLeeway time.Duration `json:",omitempty"` + ClockSkewLeeway time.Duration `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *OIDCAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + // common for type=oidc and type=jwt + "JWTSupportedAlgs": c.JWTSupportedAlgs, + "BoundAudiences": c.BoundAudiences, + "ClaimMappings": c.ClaimMappings, + "ListClaimMappings": c.ListClaimMappings, + "OIDCDiscoveryURL": c.OIDCDiscoveryURL, + "OIDCDiscoveryCACert": c.OIDCDiscoveryCACert, + // just for type=oidc + "OIDCClientID": c.OIDCClientID, + "OIDCClientSecret": c.OIDCClientSecret, + "OIDCScopes": c.OIDCScopes, + "OIDCACRValues": c.OIDCACRValues, + "AllowedRedirectURIs": c.AllowedRedirectURIs, + "VerboseOIDCLogging": c.VerboseOIDCLogging, + // just for type=jwt + "JWKSURL": c.JWKSURL, + "JWKSCACert": c.JWKSCACert, + "JWTValidationPubKeys": c.JWTValidationPubKeys, + "BoundIssuer": c.BoundIssuer, + "ExpirationLeeway": c.ExpirationLeeway, + "NotBeforeLeeway": c.NotBeforeLeeway, + "ClockSkewLeeway": c.ClockSkewLeeway, + } +} + +type ACLLoginParams struct { + AuthMethod string + BearerToken string + Meta map[string]string `json:",omitempty"` +} + +type ACLOIDCAuthURLParams struct { + AuthMethod string + RedirectURI string + ClientNonce string + Meta map[string]string `json:",omitempty"` +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// BootstrapRequest is used for when operators provide an ACL Bootstrap Token +type BootstrapRequest struct { + BootstrapSecret string +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) { + return a.BootstrapWithToken("") +} + +// BootstrapWithToken is used to get the initial bootstrap token or pass in the one that was provided in the API +func (a *ACL) BootstrapWithToken(btoken string) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + if btoken != "" { + r.obj = &BootstrapRequest{ + BootstrapSecret: btoken, + } + } + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Create is used to generate a new token with the given parameters +// +// Deprecated: Use TokenCreate instead. +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +// +// Deprecated: Use TokenUpdate instead. +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +// +// Deprecated: Use TokenDelete instead. +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +// +// Deprecated: Use TokenClone instead. +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +// +// Deprecated: Use TokenRead instead. +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +// +// Deprecated: Use TokenList instead. +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields +// of the ACLToken structure are empty they will be filled in by Consul. +func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/token") + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid +// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may +// be omitted and will be filled in by Consul with its existing value. +func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID == "" { + return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating") + } + r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID) + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenClone will create a new token with the same policies and locality as the original +// token but will have its own auto-generated AccessorID and SecretID as well having the +// description passed to this function. The accessorID parameter must be a valid Accessor ID +// of an existing token. +func (a *ACL) TokenClone(accessorID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if accessorID == "" { + return nil, nil, fmt.Errorf("Must specify a token AccessorID for Token Cloning") + } + + r := a.c.newRequest("PUT", "/v1/acl/token/"+accessorID+"/clone") + r.setWriteOptions(q) + r.obj = struct{ Description string }{description} + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenDelete removes a single ACL token. The accessorID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenDelete(accessorID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/token/"+accessorID) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// TokenRead retrieves the full token details. The accessorID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenRead(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+accessorID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenReadExpanded retrieves the full token details, as well as the contents of any policies affecting the token. +// The accessorID parameter must be a valid Accessor ID of an existing token. +func (a *ACL) TokenReadExpanded(accessorID string, q *QueryOptions) (*ACLTokenExpanded, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+accessorID) + r.setQueryOptions(q) + r.params.Set("expanded", "true") + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLTokenExpanded + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenReadSelf retrieves the full token details of the token currently +// assigned to the API Client. In this manner its possible to read a token +// by its Secret ID. +func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/self") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenList lists all tokens. The listing does not contain any SecretIDs as those +// may only be retrieved by a call to TokenRead. +func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/tokens") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLTokenListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TokenListFiltered lists all tokens that match the given filter options. +// The listing does not contain any SecretIDs as those may only be retrieved by a call to TokenRead. +func (a *ACL) TokenListFiltered(t ACLTokenFilterOptions, q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/tokens") + r.setQueryOptions(q) + + if t.AuthMethod != "" { + r.params.Set("authmethod", t.AuthMethod) + } + if t.Policy != "" { + r.params.Set("policy", t.Policy) + } + if t.Role != "" { + r.params.Set("role", t.Role) + } + if t.ServiceName != "" { + r.params.Set("servicename", t.ServiceName) + } + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLTokenListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// PolicyCreate will create a new policy. It is not allowed for the policy parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") + } + r := a.c.newRequest("PUT", "/v1/acl/policy") + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an +// existing policy ID +func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyDelete deletes a policy given its ID. +func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// PolicyRead retrieves the policy details including the rule set. +func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// PolicyReadByName retrieves the policy details including the rule set with name. +func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policy/name/"+url.QueryEscape(policyName)) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// PolicyList retrieves a listing of all policies. The listing does not include the +// rules for any policy as those should be retrieved by subsequent calls to PolicyRead. +func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policies") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLPolicyListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// RulesTranslate translates the legacy rule syntax into the current syntax. +// +// Deprecated: Support for the legacy syntax translation has been removed. +// This function always returns an error. +func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { + return "", fmt.Errorf("Legacy ACL rules were deprecated in Consul 1.4") +} + +// RulesTranslateToken translates the rules associated with the legacy syntax +// into the current syntax and returns the results. +// +// Deprecated: Support for the legacy syntax translation has been removed. +// This function always returns an error. +func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { + return "", fmt.Errorf("Legacy ACL tokens and rules were deprecated in Consul 1.4") +} + +// RoleCreate will create a new role. It is not allowed for the role parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/role") + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleUpdate updates a role. The ID field of the role parameter must be set to an +// existing role ID +func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Role Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleDelete deletes a role given its ID. +func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// RoleRead retrieves the role details (by ID). Returns nil if not found. +func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleReadByName retrieves the role details (by name). Returns nil if not found. +func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleList retrieves a listing of all roles. The listing does not include some +// metadata for the role as those should be retrieved by subsequent calls to +// RoleRead. +func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/roles") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLRole + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// AuthMethodCreate will create a new auth method. +func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method") + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodUpdate updates an auth method. +func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodDelete deletes an auth method given its Name. +func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { + if methodName == "" { + return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") + } + + r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// AuthMethodRead retrieves the auth method. Returns nil if not found. +func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { + if methodName == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") + } + + r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// AuthMethodList retrieves a listing of all auth methods. The listing does not +// include some metadata for the auth method as those should be retrieved by +// subsequent calls to AuthMethodRead. +func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/auth-methods") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLAuthMethodListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// BindingRuleCreate will create a new binding rule. It is not allowed for the +// binding rule parameter's ID field to be set as this will be generated by +// Consul while processing the request. +func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule") + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleUpdate updates a binding rule. The ID field of the role binding +// rule parameter must be set to an existing binding rule ID. +func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleDelete deletes a binding rule given its ID. +func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// BindingRuleRead retrieves the binding rule details. Returns nil if not found. +func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// BindingRuleList retrieves a listing of all binding rules. +func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rules") + if methodName != "" { + r.params.Set("authmethod", methodName) + } + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLBindingRule + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Login is used to exchange auth method credentials for a newly-minted Consul Token. +func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/login") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Logout is used to destroy a Consul Token created via Login(). +func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/logout") + r.setWriteOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// OIDCAuthURL requests an authorization URL to start an OIDC login flow. +func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string, *WriteMeta, error) { + if auth.AuthMethod == "" { + return "", nil, fmt.Errorf("Must specify an auth method name") + } + + r := a.c.newRequest("POST", "/v1/acl/oidc/auth-url") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out aclOIDCAuthURLResponse + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.AuthURL, wm, nil +} + +type aclOIDCAuthURLResponse struct { + AuthURL string +} + +type ACLOIDCCallbackParams struct { + AuthMethod string + State string + Code string + ClientNonce string +} + +// OIDCCallback is the callback endpoint to complete an OIDC login. +func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if auth.AuthMethod == "" { + return nil, nil, fmt.Errorf("Must specify an auth method name") + } + + r := a.c.newRequest("POST", "/v1/acl/oidc/callback") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// TemplatedPolicyReadByName retrieves the templated policy details (by name). Returns nil if not found. +func (a *ACL) TemplatedPolicyReadByName(templateName string, q *QueryOptions) (*ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policy/name/"+templateName) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLTemplatedPolicyResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TemplatedPolicyList retrieves a listing of all templated policies. +func (a *ACL) TemplatedPolicyList(q *QueryOptions) (map[string]ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policies") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries map[string]ACLTemplatedPolicyResponse + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TemplatedPolicyPreview is used to preview the policy rendered by the templated policy. +func (a *ACL) TemplatedPolicyPreview(tp *ACLTemplatedPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/templated-policy/preview/"+tp.TemplateName) + r.setWriteOptions(q) + r.obj = tp.TemplateVariables + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 0000000000..24e2c50d64 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,1446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net/http" +) + +// ServiceKind is the kind of service being registered. +type ServiceKind string + +const ( + // ServiceKindTypical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKindTypical ServiceKind = "" + + // ServiceKindConnectProxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKindConnectProxy ServiceKind = "connect-proxy" + + // ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This + // service will proxy connections based off the SNI header set by other + // connect proxies + ServiceKindMeshGateway ServiceKind = "mesh-gateway" + + // ServiceKindTerminatingGateway is a Terminating Gateway for the Connect + // feature. This service will proxy connections to services outside the mesh. + ServiceKindTerminatingGateway ServiceKind = "terminating-gateway" + + // ServiceKindIngressGateway is an Ingress Gateway for the Connect feature. + // This service will ingress connections based of configuration defined in + // the ingress-gateway config entry. + ServiceKindIngressGateway ServiceKind = "ingress-gateway" + + // ServiceKindAPIGateway is an API Gateway for the Connect feature. + // This service will ingress connections based of configuration defined in + // the api-gateway config entry. + ServiceKindAPIGateway ServiceKind = "api-gateway" +) + +// UpstreamDestType is the type of upstream discovery mechanism. +type UpstreamDestType string + +const ( + // UpstreamDestTypeService discovers instances via healthy service lookup. + UpstreamDestTypeService UpstreamDestType = "service" + + // UpstreamDestTypePreparedQuery discovers instances via prepared query + // execution. + UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + Type string + ExposedPort int + Definition HealthCheckDefinition + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +// AgentWeights represent optional weights for a service +type AgentWeights struct { + Passing int + Warning int +} + +// AgentService represents a service known to the agent +type AgentService struct { + Kind ServiceKind `json:",omitempty"` + ID string + Service string + Tags []string + Meta map[string]string + Port int + Address string + SocketPath string `json:",omitempty"` + TaggedAddresses map[string]ServiceAddress `json:",omitempty"` + Weights AgentWeights + EnableTagOverride bool + CreateIndex uint64 `json:",omitempty" bexpr:"-"` + ModifyIndex uint64 `json:",omitempty" bexpr:"-"` + ContentHash string `json:",omitempty" bexpr:"-"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` + PeerName string `json:",omitempty"` + // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need + // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. + // For now though, ignoring it works well enough. + Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` + Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` + // Datacenter is only ever returned and is ignored if presented. + Datacenter string `json:",omitempty" bexpr:"-" hash:"ignore"` + Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` +} + +// AgentServiceChecksInfo returns information about a Service and its checks +type AgentServiceChecksInfo struct { + AggregatedStatus string + Service *AgentService + Checks HealthChecks +} + +// AgentServiceConnect represents the Connect configuration of a service. +type AgentServiceConnect struct { + Native bool `json:",omitempty"` + SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` +} + +// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy +// ServiceDefinition or response. +type AgentServiceConnectProxyConfig struct { + EnvoyExtensions []EnvoyExtension `json:",omitempty"` + DestinationServiceName string `json:",omitempty"` + DestinationServiceID string `json:",omitempty"` + LocalServiceAddress string `json:",omitempty"` + LocalServicePort int `json:",omitempty"` + LocalServiceSocketPath string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty"` + Expose ExposeConfig `json:",omitempty"` + AccessLogs *AccessLogsConfig `json:",omitempty"` +} + +const ( + // MemberTagKeyACLMode is the key used to indicate what ACL mode the agent is + // operating in. The values of this key will be one of the MemberACLMode constants + // with the key not being present indicating ACLModeUnknown. + MemberTagKeyACLMode = "acls" + + // MemberTagRole is the key used to indicate that the member is a server or not. + MemberTagKeyRole = "role" + + // MemberTagValueRoleServer is the value of the MemberTagKeyRole used to indicate + // that the member represents a Consul server. + MemberTagValueRoleServer = "consul" + + // MemberTagValueRoleClient is the value of the MemberTagKeyRole used to indicate + // that the member represents a Consul client. + MemberTagValueRoleClient = "node" + + // MemberTagKeyDatacenter is the key used to indicate which datacenter this member is in. + MemberTagKeyDatacenter = "dc" + + // MemberTagKeySegment is the key name of the tag used to indicate which network + // segment this member is in. + // Network Segments are a Consul Enterprise feature. + MemberTagKeySegment = "segment" + + // MemberTagKeyPartition is the key name of the tag used to indicate which partition + // this member is in. + // Partitions are a Consul Enterprise feature. + MemberTagKeyPartition = "ap" + + // MemberTagKeyBootstrap is the key name of the tag used to indicate whether this + // agent was started with the "bootstrap" configuration enabled + MemberTagKeyBootstrap = "bootstrap" + // MemberTagValueBootstrap is the value of the MemberTagKeyBootstrap key when the + // agent was started with the "bootstrap" configuration enabled. + MemberTagValueBootstrap = "1" + + // MemberTagKeyBootstrapExpect is the key name of the tag used to indicate whether + // this agent was started with the "bootstrap_expect" configuration set to a non-zero + // value. The value of this key will be the string for of that configuration value. + MemberTagKeyBootstrapExpect = "expect" + + // MemberTagKeyUseTLS is the key name of the tag used to indicate whther this agent + // was configured to use TLS. + MemberTagKeyUseTLS = "use_tls" + // MemberTagValueUseTLS is the value of the MemberTagKeyUseTLS when the agent was + // configured to use TLS. Any other value indicates that it was not setup in + // that manner. + MemberTagValueUseTLS = "1" + + // MemberTagKeyReadReplica is the key used to indicate that the member is a read + // replica server (will remain a Raft non-voter). + // Read Replicas are a Consul Enterprise feature. + MemberTagKeyReadReplica = "read_replica" + // MemberTagValueReadReplica is the value of the MemberTagKeyReadReplica key when + // the member is in fact a read-replica. Any other value indicates that it is not. + // Read Replicas are a Consul Enterprise feature. + MemberTagValueReadReplica = "1" +) + +type MemberACLMode string + +const ( + // ACLModeDisables indicates that ACLs are disabled for this agent + ACLModeDisabled MemberACLMode = "0" + // ACLModeEnabled indicates that ACLs are enabled and operating in new ACL + // mode (v1.4.0+ ACLs) + ACLModeEnabled MemberACLMode = "1" + // ACLModeLegacy has been deprecated, and will be treated as ACLModeUnknown. + ACLModeLegacy MemberACLMode = "2" // DEPRECATED + // ACLModeUnkown is used to indicate that the AgentMember.Tags didn't advertise + // an ACL mode at all. This is the case for Consul versions before v1.4.0 and + // should be treated the same as ACLModeLegacy. + ACLModeUnknown MemberACLMode = "3" +) + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + // Status of the Member which corresponds to github.com/hashicorp/serf/serf.MemberStatus + // Value is one of: + // + // AgentMemberNone = 0 + // AgentMemberAlive = 1 + // AgentMemberLeaving = 2 + // AgentMemberLeft = 3 + // AgentMemberFailed = 4 + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// ACLMode returns the ACL mode this agent is operating in. +func (m *AgentMember) ACLMode() MemberACLMode { + mode := m.Tags[MemberTagKeyACLMode] + + // the key may not have existed but then an + // empty string will be returned and we will + // handle that in the default case of the switch + switch MemberACLMode(mode) { + case ACLModeDisabled: + return ACLModeDisabled + case ACLModeEnabled: + return ACLModeEnabled + default: + return ACLModeUnknown + } +} + +// IsConsulServer returns true when this member is a Consul server. +func (m *AgentMember) IsConsulServer() bool { + return m.Tags[MemberTagKeyRole] == MemberTagValueRoleServer +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string + + Filter string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + Kind ServiceKind `json:",omitempty"` + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + SocketPath string `json:",omitempty"` + TaggedAddresses map[string]ServiceAddress `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + Weights *AgentWeights `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` + Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` + Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` + Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` +} + +// ServiceRegisterOpts is used to pass extra options to the service register. +type ServiceRegisterOpts struct { + // Missing healthchecks will be deleted from the agent. + // Using this parameter allows to idempotently register a service and its checks without + // having to manually deregister checks. + ReplaceExistingChecks bool + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use WithContext() to set the context. + ctx context.Context +} + +// WithContext sets the context to be used for the request on a new ServiceRegisterOpts, +// and returns the opts. +func (o ServiceRegisterOpts) WithContext(ctx context.Context) ServiceRegisterOpts { + o.ctx = ctx + return o +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + CheckID string `json:",omitempty"` + Name string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + Body string `json:",omitempty"` + TCP string `json:",omitempty"` + TCPUseTLS bool `json:",omitempty"` + UDP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSServerName string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + GRPC string `json:",omitempty"` + GRPCUseTLS bool `json:",omitempty"` + H2PING string `json:",omitempty"` + H2PingUseTLS bool `json:",omitempty"` + AliasNode string `json:",omitempty"` + AliasService string `json:",omitempty"` + SuccessBeforePassing int `json:",omitempty"` + FailuresBeforeWarning int `json:",omitempty"` + FailuresBeforeCritical int `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// AgentAuthorizeParams are the request parameters for authorizing a request. +type AgentAuthorizeParams struct { + Target string + ClientCertURI string + ClientCertSerial string +} + +// AgentAuthorize is the response structure for Connect authorization. +type AgentAuthorize struct { + Authorized bool + Reason string +} + +// ConnectProxyConfig is the response structure for agent-local proxy +// configuration. +type ConnectProxyConfig struct { + ProxyServiceID string + TargetServiceID string + TargetServiceName string + ContentHash string + Config map[string]interface{} `bexpr:"-"` + Upstreams []Upstream +} + +// Upstream is the response structure for a proxy upstream configuration. +type Upstream struct { + DestinationType UpstreamDestType `json:",omitempty"` + DestinationPartition string `json:",omitempty"` + DestinationNamespace string `json:",omitempty"` + DestinationPeer string `json:",omitempty"` + DestinationName string + Datacenter string `json:",omitempty"` + LocalBindAddress string `json:",omitempty"` + LocalBindPort int `json:",omitempty"` + LocalBindSocketPath string `json:",omitempty"` + LocalBindSocketMode string `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + MeshGateway MeshGatewayConfig `json:",omitempty"` + CentrallyConfigured bool `json:",omitempty" bexpr:"-"` +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Host is used to retrieve information about the host the +// agent is running on such as CPU, memory, and disk. Requires +// a operator:read ACL token. +func (a *Agent) Host() (map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/host") + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Version is used to retrieve information about the running Consul version and build. +func (a *Agent) Version() (map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/version") + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MetricsStream returns an io.ReadCloser which will emit a stream of metrics +// until the context is cancelled. The metrics are json encoded. +// The caller is responsible for closing the returned io.ReadCloser. +func (a *Agent) MetricsStream(ctx context.Context) (io.ReadCloser, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics/stream") + r.ctx = ctx + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + return resp.Body, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + return a.ChecksWithFilter("") +} + +// ChecksWithFilter returns a subset of the locally registered checks that match +// the given filter expression +func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { + return a.ChecksWithFilterOpts(filter, nil) +} + +// ChecksWithFilterOpts returns a subset of the locally registered checks that match +// the given filter expression and QueryOptions. +func (a *Agent) ChecksWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + r.setQueryOptions(q) + r.filterQuery(filter) + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + return a.ServicesWithFilter("") +} + +// ServicesWithFilter returns a subset of the locally registered services that match +// the given filter expression +func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { + return a.ServicesWithFilterOpts(filter, nil) +} + +// ServicesWithFilterOpts returns a subset of the locally registered services that match +// the given filter expression and QueryOptions. +func (a *Agent) ServicesWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + r.setQueryOptions(q) + r.filterQuery(filter) + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return out, nil +} + +// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any +// - If the service is not found, will return status (critical, nil, nil) +// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) { + return a.AgentHealthServiceByIDOpts(serviceID, nil) +} + +func (a *Agent) AgentHealthServiceByIDOpts(serviceID string, q *QueryOptions) (string, *AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/id/%v", serviceID) + r := a.c.newRequest("GET", path) + r.setQueryOptions(q) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + // not a lot of value in wrapping the doRequest call in a requireHttpCodes call + // we manipulate the resp body and the require calls "swallow" the content on err + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out *AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services +// having the specified name. +// - If no service is not found, will return status (critical, [], nil) +// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) { + return a.AgentHealthServiceByNameOpts(service, nil) +} + +func (a *Agent) AgentHealthServiceByNameOpts(service string, q *QueryOptions) (string, []AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/name/%v", service) + r := a.c.newRequest("GET", path) + r.setQueryOptions(q) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + // not a lot of value in wrapping the doRequest call in a requireHttpCodes call + // we manipulate the resp body and the require calls "swallow" the content on err + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out []AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// Service returns a locally registered service instance and allows for +// hash-based blocking. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + if opts.Filter != "" { + r.params.Set("filter", opts.Filter) + } + + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + opts := ServiceRegisterOpts{ + ReplaceExistingChecks: false, + } + + return a.serviceRegister(service, opts) +} + +// ServiceRegister is used to register a new service with +// the local agent and can be passed additional options. +func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts ServiceRegisterOpts) error { + return a.serviceRegister(service, opts) +} + +func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + r.ctx = opts.ctx + if opts.ReplaceExistingChecks { + r.params.Set("replace-existing-checks", "true") + } + if opts.Token != "" { + r.header.Set("X-Consul-Token", opts.Token) + } + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// ServiceDeregisterOpts is used to deregister a service with +// the local agent with QueryOptions. +func (a *Agent) ServiceDeregisterOpts(serviceID string, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + r.setQueryOptions(q) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + return a.UpdateTTLOpts(checkID, output, status, nil) +} + +func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.setQueryOptions(q) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + return a.CheckRegisterOpts(check, nil) +} + +// CheckRegisterOpts is used to register a new check with +// the local agent using query options +func (a *Agent) CheckRegisterOpts(check *AgentCheckRegistration, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.setQueryOptions(q) + r.obj = check + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + return a.CheckDeregisterOpts(checkID, nil) +} + +// CheckDeregisterOpts is used to deregister a check with +// the local agent using query options +func (a *Agent) CheckDeregisterOpts(checkID string, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + r.setQueryOptions(q) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +type ForceLeaveOpts struct { + // Prune indicates if we should remove a failed agent from the list of + // members in addition to ejecting it. + Prune bool + + // WAN indicates that the request should exclusively target the WAN pool. + WAN bool +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + return a.ForceLeaveOpts(node, ForceLeaveOpts{}) +} + +// ForceLeavePrune is used to have an a failed agent removed +// from the list of members +func (a *Agent) ForceLeavePrune(node string) error { + return a.ForceLeaveOpts(node, ForceLeaveOpts{Prune: true}) +} + +// ForceLeaveOpts is used to have the agent eject a failed node or remove it +// completely from the list of members. +// +// DEPRECATED - Use ForceLeaveOptions instead. +func (a *Agent) ForceLeaveOpts(node string, opts ForceLeaveOpts) error { + return a.ForceLeaveOptions(node, opts, nil) +} + +// ForceLeaveOptions is used to have the agent eject a failed node or remove it +// completely from the list of members. Allows usage of QueryOptions on-top of ForceLeaveOpts +func (a *Agent) ForceLeaveOptions(node string, opts ForceLeaveOpts, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + r.setQueryOptions(q) + if opts.Prune { + r.params.Set("prune", "1") + } + if opts.WAN { + r.params.Set("wan", "1") + } + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// ConnectAuthorize is used to authorize an incoming connection +// to a natively integrated Connect service. +func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { + r := a.c.newRequest("POST", "/v1/agent/connect/authorize") + r.obj = auth + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out AgentAuthorize + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// ConnectCARoots returns the list of roots. +func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectCALeaf gets the leaf certificate for the given service ID. +func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out LeafCert + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + return a.EnableServiceMaintenanceOpts(serviceID, reason, nil) +} + +func (a *Agent) EnableServiceMaintenanceOpts(serviceID, reason string, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.setQueryOptions(q) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + return a.DisableServiceMaintenanceOpts(serviceID, nil) +} + +func (a *Agent) DisableServiceMaintenanceOpts(serviceID string, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.setQueryOptions(q) + r.params.Set("enable", "false") + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := a.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + return a.monitor(loglevel, false, stopCh, q) +} + +// MonitorJSON is like Monitor except it returns logs in JSON format. +func (a *Agent) MonitorJSON(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + return a.monitor(loglevel, true, stopCh, q) +} + +func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + if logJSON { + r.params.Set("logjson", "true") + } + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + if err := requireOK(resp); err != nil { + return nil, err + } + logCh := make(chan string, 64) + go func() { + defer closeResponseBody(resp) + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. Deprecated in Consul 1.4. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above +func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. Deprecated in Consul 1.4. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. Deprecated in Consul 1.4. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. Deprecated in Consul 1.4. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above +func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return nil, fmt.Errorf("Legacy ACL Tokens were deprecated in Consul 1.4") +} + +// UpdateDefaultACLToken updates the agent's "default" token. See updateToken +// for more details +func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "default", "acl_token") +} + +// UpdateAgentACLToken updates the agent's "agent" token. See updateToken +// for more details +func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "agent", "acl_agent_token") +} + +// UpdateAgentRecoveryACLToken updates the agent's "agent_recovery" token. See updateToken +// for more details. +func (a *Agent) UpdateAgentRecoveryACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "agent_recovery", "agent_master", "acl_agent_master_token") +} + +// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken +// for more details. +// +// DEPRECATED - Prefer UpdateAgentRecoveryACLToken for v1.11 and above. +func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "agent_master", "acl_agent_master_token") +} + +// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken +// for more details +func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "replication", "acl_replication_token") +} + +// UpdateConfigFileRegistrationToken updates the agent's "replication" token. See updateToken +// for more details +func (a *Agent) UpdateConfigFileRegistrationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("config_file_service_registration", token, q) +} + +func (a *Agent) UpdateDNSToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("dns", token, q) +} + +// updateToken can be used to update one of an agent's ACL tokens after the agent has +// started. The tokens are may not be persisted, so will need to be updated again if +// the agent is restarted unless the agent is configured to persist them. +func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + meta, _, err := a.updateTokenOnce(target, token, q) + return meta, err +} + +func (a *Agent) updateTokenFallback(token string, q *WriteOptions, targets ...string) (*WriteMeta, error) { + if len(targets) == 0 { + panic("targets must not be empty") + } + + var ( + meta *WriteMeta + err error + ) + for _, target := range targets { + var status int + meta, status, err = a.updateTokenOnce(target, token, q) + if err == nil && status != http.StatusNotFound { + return meta, err + } + } + return meta, err +} + +func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) { + r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, 500, err + } + defer closeResponseBody(resp) + wm := &WriteMeta{RequestTime: rtt} + if err := requireOK(resp); err != nil { + var statusE StatusError + if errors.As(err, &statusE) { + return wm, statusE.Code, statusE + } + return nil, 0, err + } + return wm, resp.StatusCode, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 0000000000..f62c0c5a1b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,1277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPTokenFileEnvName defines an environment variable name which sets + // the HTTP token file. + HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" + + // GRPCAddrEnvName defines an environment variable name which sets the gRPC + // address for consul connect envoy. Note this isn't actually used by the api + // client in this package but is defined here for consistency with all the + // other ENV names we use. + GRPCAddrEnvName = "CONSUL_GRPC_ADDR" + + // GRPCCAFileEnvName defines an environment variable name which sets the + // CA file to use for talking to Consul gRPC over TLS. + GRPCCAFileEnvName = "CONSUL_GRPC_CACERT" + + // GRPCCAPathEnvName defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul gRPC over TLS. + GRPCCAPathEnvName = "CONSUL_GRPC_CAPATH" + + // HTTPNamespaceEnvVar defines an environment variable name which sets + // the HTTP Namespace to be used by default. This can still be overridden. + HTTPNamespaceEnvName = "CONSUL_NAMESPACE" + + // HTTPPartitionEnvName defines an environment variable name which sets + // the HTTP Partition to be used by default. This can still be overridden. + HTTPPartitionEnvName = "CONSUL_PARTITION" + + // QueryBackendStreaming Query backend of type streaming + QueryBackendStreaming = "streaming" + + // QueryBackendBlockingQuery Query backend of type blocking query + QueryBackendBlockingQuery = "blocking-query" +) + +type StatusError struct { + Code int + Body string +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Unexpected response code: %d (%s)", e.Code, e.Body) +} + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Namespace overrides the `default` namespace + // Note: Namespaces are available only in Consul Enterprise + Namespace string + + // Partition overrides the `default` partition + // Note: Partitions are available only in Consul Enterprise + Partition string + + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Providing a peer name in the query option + Peer string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // UseCache requests that the agent cache results locally. See + // https://www.consul.io/api/features/caching.html for more details on the + // semantics. + UseCache bool + + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/features/caching.html for more details. + MaxAge time.Duration + + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/features/caching.html for more details. + StaleIfError time.Duration + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitHash is used by some endpoints instead of WaitIndex to perform blocking + // on state based on a hash of the response rather than a monotonic index. + // This is required when the state being blocked on is not stored in Raft, for + // example agent-local proxy configuration. + WaitHash string + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // LocalOnly is used in keyring list operation to force the keyring + // query to only hit local servers (no WAN traffic). + LocalOnly bool + + // Connect filters prepared query execution to only include Connect-capable + // services. This currently affects prepared query execution. + Connect bool + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context + + // Filter requests filtering data prior to it being returned. The string + // is a go-bexpr compatible expression. + Filter string + + // MergeCentralConfig returns a service definition merged with the + // proxy-defaults/global and service-defaults/:service config entries. + // This can be used to ensure a full service definition is returned in the response + // especially when the service might not be written into the catalog that way. + MergeCentralConfig bool + + // Global is used to request information from all datacenters. Currently only + // used for operator usage requests. + Global bool +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Namespace overrides the `default` namespace + // Note: Namespaces are available only in Consul Enterprise + Namespace string + + // Partition overrides the `default` partition + // Note: Partitions are available only in Consul Enterprise + Partition string + + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // LastContentHash. This can be used as a WaitHash to perform a blocking query + // for endpoints that support hash-based blocking. Endpoints that do not + // support it will return an empty hash. + LastContentHash string + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool + + // CacheHit is true if the result was served from agent-local cache. + CacheHit bool + + // CacheAge is set if request was ?cached and indicates how stale the cached + // response is. + CacheAge time.Duration + + // QueryBackend represent which backend served the request. + QueryBackend string + + // DefaultACLPolicy is used to control the ACL interaction when there is no + // defined policy. This can be "allow" which means ACLs are used to + // deny-list, or "deny" which means ACLs are allow-lists. + DefaultACLPolicy string + + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Prefix for URIs for when consul is behind an API gateway (reverse + // proxy). The API gateway must strip off the PathPrefix before + // passing the request onto consul. + PathPrefix string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // TokenFile is a file containing the current token to use for this client. + // If provided it is read once at startup and never again. + TokenFile string + + // Namespace is the name of the namespace to send along for the request + // when no other Namespace is present in the QueryOptions + Namespace string + + // Partition is the name of the partition to send along for the request + // when no other Partition is present in the QueryOptions + Partition string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CAPem is the optional PEM-encoded CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAPem []byte + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // CertPEM is the optional PEM-encoded certificate for Consul + // communication. If this is set then you need to also set KeyPEM. + CertPEM []byte + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // KeyPEM is the optional PEM-encoded private key for Consul communication. + // If this is set then you need to also set CertPEM. + KeyPEM []byte + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object, which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(nil, cleanhttp.DefaultPooledTransport) +} + +// DefaultConfigWithLogger returns a default configuration for the client. It +// is exactly the same as DefaultConfig, but allows for a pre-configured logger +// object to be passed through. +func DefaultConfigWithLogger(logger hclog.Logger) *Config { + return defaultConfig(logger, cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(nil, cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(logger hclog.Logger, transportFn func() *http.Transport) *Config { + if logger == nil { + logger = hclog.New(&hclog.LoggerOptions{ + Name: "consul-api", + }) + } + + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { + config.TokenFile = tokenFile + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLEnvName), "error", err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLVerifyEnvName), "error", err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + if v := os.Getenv(HTTPNamespaceEnvName); v != "" { + config.Namespace = v + } + + if v := os.Getenv(HTTPPartitionEnvName); v != "" { + config.Partition = v + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if len(tlsConfig.CertPEM) != 0 && len(tlsConfig.KeyPEM) != 0 { + tlsCert, err := tls.X509KeyPair(tlsConfig.CertPEM, tlsConfig.KeyPEM) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } else if len(tlsConfig.CertPEM) != 0 || len(tlsConfig.KeyPEM) != 0 { + return nil, fmt.Errorf("both client cert and client key must be provided") + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } else if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" { + return nil, fmt.Errorf("both client cert and client key must be provided") + } + + if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" || len(tlsConfig.CAPem) != 0 { + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + CACertificate: tlsConfig.CAPem, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + } + + return tlsClientConfig, nil +} + +func (c *Config) GenerateEnv() []string { + env := make([]string, 0, 10) + + env = append(env, + fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), + fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), + fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), + fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), + fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), + fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), + fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile), + fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile), + fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address), + fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify)) + + if c.HttpAuth != nil { + env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password)) + } else { + env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName)) + } + + return env +} + +// Client provides a client to the Consul API +type Client struct { + modifyLock sync.RWMutex + headers http.Header + + config Config +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if config.Address == "" { + config.Address = defConfig.Address + } + + if config.Scheme == "" { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + if config.Namespace == "" { + config.Namespace = defConfig.Namespace + } + + if config.Partition == "" { + config.Partition = defConfig.Partition + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + // Never revert to http if TLS was explicitly requested. + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + httpClient, err := NewHttpClient(trans, config.TLSConfig) + if err != nil { + return nil, err + } + config.HttpClient = httpClient + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + + // separate out a reverse proxy prefix, if it is present. + // NOTE: Rewriting this code to use url.Parse() instead of + // strings.SplitN() breaks existing test cases. + switch parts[0] { + case "http", "https": + parts := strings.SplitN(parts[1], "/", 2) + if len(parts) == 2 { + config.Address = parts[0] + config.PathPrefix = "/" + parts[1] + } + } + } + + // If the TokenFile is set, always use that, even if a Token is configured. + // This is because when TokenFile is set it is read into the Token field. + // We want any derived clients to have to re-read the token file. + // The precedence of ACL token should be: + // 1. -token-file cli option + // 2. -token cli option + // 3. CONSUL_HTTP_TOKEN_FILE environment variable + // 4. CONSUL_HTTP_TOKEN environment variable + if config.TokenFile != "" && config.TokenFile != defConfig.TokenFile { + data, err := os.ReadFile(config.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file %s : %s", config.TokenFile, err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + } + } else if config.Token != "" && defConfig.Token != config.Token { + // Fall through + } else if defConfig.TokenFile != "" { + data, err := os.ReadFile(defConfig.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file %s : %s", defConfig.TokenFile, err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + config.TokenFile = defConfig.TokenFile + } + } else { + config.Token = defConfig.Token + } + return &Client{config: *config, headers: make(http.Header)}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + // TODO (slackpad) - Once we get some run time on the HTTP/2 support we + // should turn it on by default if TLS is enabled. We would basically + // just need to call http2.ConfigureTransport(transport) here. We also + // don't want to introduce another external dependency on + // golang.org/x/net/http2 at this time. For a complete recipe for how + // to enable HTTP/2 support on a transport suitable for the API client + // library see agent/http_test.go:TestHTTPServer_H2. + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Namespace != "" { + // For backwards-compatibility with existing tests, + // use the short-hand query param name "ns" + // rather than the alternative long-hand "namespace" + r.params.Set("ns", q.Namespace) + } + if q.Partition != "" { + // For backwards-compatibility with existing tests, + // use the long-hand query param name "partition" + // rather than the alternative short-hand "ap" + r.params.Set("partition", q.Partition) + } + if q.Datacenter != "" { + // For backwards-compatibility with existing tests, + // use the short-hand query param name "dc" + // rather than the alternative long-hand "datacenter" + r.params.Set("dc", q.Datacenter) + } + if q.Peer != "" { + r.params.Set("peer", q.Peer) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.WaitHash != "" { + r.params.Set("hash", q.WaitHash) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if q.Filter != "" { + r.params.Set("filter", q.Filter) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + if q.LocalOnly { + r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly)) + } + if q.Connect { + r.params.Set("connect", "true") + } + if q.UseCache && !q.RequireConsistent { + r.params.Set("cached", "") + + cc := []string{} + if q.MaxAge > 0 { + cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds())) + } + if q.StaleIfError > 0 { + cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds())) + } + if len(cc) > 0 { + r.header.Set("Cache-Control", strings.Join(cc, ", ")) + } + } + if q.MergeCentralConfig { + r.params.Set("merge-central-config", "") + } + if q.Global { + r.params.Set("global", "") + } + + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + // For backwards-compatibility, continue to use the shorthand "ns" + // rather than "namespace" + if q.Namespace != "" { + r.params.Set("ns", q.Namespace) + } + if q.Partition != "" { + r.params.Set("partition", q.Partition) + } + // For backwards-compatibility, continue to use the shorthand "dc" + // rather than "datacenter" + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // validate that socket communications that do not use the host, detect + // slashes in the host name and replace it with local host. + // this is required since go started validating req.host in 1.20.6 and 1.19.11. + // prior to that they would strip out the slashes for you. They removed that + // behavior and added more strict validation as part of a CVE. + // This issue is being tracked by the Go team: + // https://github.com/golang/go/issues/61431 + // If there is a resolution in this issue, we will remove this code. + // In the time being, this is the accepted workaround. + if strings.HasPrefix(r.url.Host, "/") { + r.url.Host = "localhost" + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Content-Type must always be set when a body is present + // See https://github.com/hashicorp/consul/issues/10011 + if req.Body != nil && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: c.config.PathPrefix + path, + }, + params: make(map[string][]string), + header: c.Headers(), + } + + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.Namespace != "" { + r.params.Set("ns", c.config.Namespace) + } + if c.config.Partition != "" { + r.params.Set("partition", c.config.Partition) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := io.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +// +// TODO(rb): bug? the error from this function is never handled +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index (if it's set - hash based blocking queries don't + // set this) + if indexStr := header.Get("X-Consul-Index"); indexStr != "" { + index, err := strconv.ParseUint(indexStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + } + q.LastContentHash = header.Get("X-Consul-ContentHash") + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + // Parse X-Consul-Default-ACL-Policy + switch v := header.Get("X-Consul-Default-ACL-Policy"); v { + case "allow", "deny": + q.DefaultACLPolicy = v + } + + // Parse the X-Consul-Results-Filtered-By-ACLs + switch header.Get("X-Consul-Results-Filtered-By-ACLs") { + case "true": + q.ResultsFilteredByACLs = true + default: + q.ResultsFilteredByACLs = false + } + + // Parse Cache info + if cacheStr := header.Get("X-Cache"); cacheStr != "" { + q.CacheHit = strings.EqualFold(cacheStr, "HIT") + } + if ageStr := header.Get("Age"); ageStr != "" { + age, err := strconv.ParseUint(ageStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse Age Header: %v", err) + } + q.CacheAge = time.Duration(age) * time.Second + } + + switch v := header.Get("X-Consul-Query-Backend"); v { + case QueryBackendStreaming, QueryBackendBlockingQuery: + q.QueryBackend = v + } + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(resp *http.Response) error { + return requireHttpCodes(resp, 200) +} + +// requireHttpCodes checks for the "allowable" http codes for a response +func requireHttpCodes(resp *http.Response, httpCodes ...int) error { + // if there is an http code that we require, return w no error + for _, httpCode := range httpCodes { + if resp.StatusCode == httpCode { + return nil + } + } + + // if we reached here, then none of the http codes in resp matched any that we expected + // so err out + return generateUnexpectedResponseCodeError(resp) +} + +// closeResponseBody reads resp.Body until EOF, and then closes it. The read +// is necessary to ensure that the http.Client's underlying RoundTripper is able +// to re-use the TCP connection. See godoc on net/http.Client.Do. +func closeResponseBody(resp *http.Response) error { + _, _ = io.Copy(io.Discard, resp.Body) + return resp.Body.Close() +} + +func (req *request) filterQuery(filter string) { + if filter == "" { + return + } + + req.params.Set("filter", filter) +} + +// generateUnexpectedResponseCodeError consumes the rest of the body, closes +// the body stream and generates an error indicating the status code was +// unexpected. +func generateUnexpectedResponseCodeError(resp *http.Response) error { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + closeResponseBody(resp) + + trimmed := strings.TrimSpace(string(buf.Bytes())) + return StatusError{Code: resp.StatusCode, Body: trimmed} +} + +func requireNotFoundOrOK(resp *http.Response) (bool, *http.Response, error) { + switch resp.StatusCode { + case 200: + return true, resp, nil + case 404: + return false, resp, nil + default: + return false, nil, generateUnexpectedResponseCodeError(resp) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 0000000000..0040ca6e7a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,377 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "net" + "strconv" +) + +type Weights struct { + Passing int + Warning int +} + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 + Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` + Locality *Locality `json:",omitempty"` +} + +type ServiceAddress struct { + Address string + Port int +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTaggedAddresses map[string]ServiceAddress + ServiceTags []string + ServiceMeta map[string]string + ServicePort int + ServiceWeights Weights + ServiceEnableTagOverride bool + ServiceProxy *AgentServiceConnectProxyConfig + ServiceLocality *Locality `json:",omitempty"` + CreateIndex uint64 + Checks HealthChecks + ModifyIndex uint64 + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogNodeServiceList struct { + Node *Node + Services []*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck + Checks HealthChecks + SkipNodeUpdate bool + Partition string `json:",omitempty"` + Locality *Locality `json:",omitempty"` +} + +type CatalogDeregistration struct { + Node string + Address string `json:",omitempty"` // Obsolete. + Datacenter string + ServiceID string + CheckID string + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +type CompoundServiceName struct { + Name string + + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + // Partitions are a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +// GatewayService associates a gateway with a linked service. +// It also contains service-specific gateway configuration like ingress listener port and protocol. +type GatewayService struct { + Gateway CompoundServiceName + Service CompoundServiceName + GatewayKind ServiceKind + Port int `json:",omitempty"` + Protocol string `json:",omitempty"` + Hosts []string `json:",omitempty"` + CAFile string `json:",omitempty"` + CertFile string `json:",omitempty"` + KeyFile string `json:",omitempty"` + SNI string `json:",omitempty"` + FromWildcard bool `json:",omitempty"` +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, false) +} + +// Supports multiple tags for filtering +func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, false) +} + +// Connect is used to query catalog entries for a given Connect-enabled service +func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, true) +} + +// Supports multiple tags for filtering +func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, true) +} + +func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { + path := "/v1/catalog/service/" + service + if connect { + path = "/v1/catalog/connect/" + service + } + r := c.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// NodeServiceList is used to query for service information about a single node. It differs from +// the Node function only in its return type which will contain a list of services as opposed to +// a map of service ids to services. This different structure allows for using the wildcard specifier +// '*' for the Namespace in the QueryOptions. +func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeServiceList, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node-services/"+node) + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNodeServiceList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// GatewayServices is used to query the services associated with an ingress gateway or terminating gateway. +func (c *Catalog) GatewayServices(gateway string, q *QueryOptions) ([]*GatewayService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/gateway-services/"+gateway) + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*GatewayService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +func ParseServiceAddr(addrPort string) (ServiceAddress, error) { + port := 0 + host, portStr, err := net.SplitHostPort(addrPort) + if err == nil { + port, err = strconv.Atoi(portStr) + } + return ServiceAddress{Address: host, Port: port}, err +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go new file mode 100644 index 0000000000..b59c20fd30 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -0,0 +1,687 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" +) + +const ( + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ServiceRouter string = "service-router" + ServiceSplitter string = "service-splitter" + ServiceResolver string = "service-resolver" + IngressGateway string = "ingress-gateway" + TerminatingGateway string = "terminating-gateway" + ServiceIntentions string = "service-intentions" + MeshConfig string = "mesh" + ExportedServices string = "exported-services" + SamenessGroup string = "sameness-group" + RateLimitIPConfig string = "control-plane-request-limit" + + ProxyConfigGlobal string = "global" + MeshConfigMesh string = "mesh" + APIGateway string = "api-gateway" + TCPRoute string = "tcp-route" + InlineCertificate string = "inline-certificate" + HTTPRoute string = "http-route" + JWTProvider string = "jwt-provider" +) + +const ( + BuiltinAWSLambdaExtension string = "builtin/aws/lambda" + BuiltinExtAuthzExtension string = "builtin/ext-authz" + BuiltinLuaExtension string = "builtin/lua" + BuiltinOTELAccessLoggingExtension string = "builtin/otel-access-logging" + BuiltinPropertyOverrideExtension string = "builtin/property-override" + BuiltinWasmExtension string = "builtin/wasm" + // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured + // extension type, as it is only used indirectly via troubleshooting tools. It is included here + // for common reference alongside other builtin extensions. + BuiltinValidateExtension string = "builtin/proxy/validate" +) + +type ConfigEntry interface { + GetKind() string + GetName() string + GetPartition() string + GetNamespace() string + GetMeta() map[string]string + GetCreateIndex() uint64 + GetModifyIndex() uint64 +} + +type MeshGatewayMode string + +const ( + // MeshGatewayModeDefault represents no specific mode and should + // be used to indicate that a different layer of the configuration + // chain should take precedence + MeshGatewayModeDefault MeshGatewayMode = "" + + // MeshGatewayModeNone represents that the Upstream Connect connections + // should be direct and not flow through a mesh gateway. + MeshGatewayModeNone MeshGatewayMode = "none" + + // MeshGatewayModeLocal represents that the Upstream Connect connections + // should be made to a mesh gateway in the local datacenter. + MeshGatewayModeLocal MeshGatewayMode = "local" + + // MeshGatewayModeRemote represents that the Upstream Connect connections + // should be made to a mesh gateway in a remote datacenter. + MeshGatewayModeRemote MeshGatewayMode = "remote" +) + +// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect +// services +type MeshGatewayConfig struct { + // Mode is the mode that should be used for the upstream connection. + Mode MeshGatewayMode `json:",omitempty"` +} + +type ProxyMode string + +const ( + // ProxyModeDefault represents no specific mode and should + // be used to indicate that a different layer of the configuration + // chain should take precedence + ProxyModeDefault ProxyMode = "" + + // ProxyModeTransparent represents that inbound and outbound application + // traffic is being captured and redirected through the proxy. + ProxyModeTransparent ProxyMode = "transparent" + + // ProxyModeDirect represents that the proxy's listeners must be dialed directly + // by the local application and other proxies. + ProxyModeDirect ProxyMode = "direct" +) + +type TransparentProxyConfig struct { + // The port of the listener where outbound application traffic is being redirected to. + OutboundListenerPort int `json:",omitempty" alias:"outbound_listener_port"` + + // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly. + // The discovery chain is not considered when dialing a service instance directly. + // This setting is useful when addressing stateful services, such as a database cluster with a leader node. + DialedDirectly bool `json:",omitempty" alias:"dialed_directly"` +} + +type MutualTLSMode string + +const ( + // MutualTLSModeDefault represents no specific mode and should + // be used to indicate that a different layer of the configuration + // chain should take precedence. + MutualTLSModeDefault MutualTLSMode = "" + + // MutualTLSModeStrict requires mTLS for incoming traffic. + MutualTLSModeStrict MutualTLSMode = "strict" + + // MutualTLSModePermissive allows incoming non-mTLS traffic. + MutualTLSModePermissive MutualTLSMode = "permissive" +) + +// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect. +// Users can expose individual paths and/or all HTTP/GRPC paths for checks. +type ExposeConfig struct { + // Checks defines whether paths associated with Consul checks will be exposed. + // This flag triggers exposing all HTTP and GRPC check paths registered for the service. + Checks bool `json:",omitempty"` + + // Paths is the list of paths exposed through the proxy. + Paths []ExposePath `json:",omitempty"` +} + +// EnvoyExtension has configuration for an extension that patches Envoy resources. +type EnvoyExtension struct { + Name string + Required bool + Arguments map[string]interface{} `bexpr:"-"` + ConsulVersion string + EnvoyVersion string +} + +type ExposePath struct { + // ListenerPort defines the port of the proxy's listener for exposed paths. + ListenerPort int `json:",omitempty" alias:"listener_port"` + + // Path is the path to expose through the proxy, ie. "/metrics." + Path string `json:",omitempty"` + + // LocalPathPort is the port that the service is listening on for the given path. + LocalPathPort int `json:",omitempty" alias:"local_path_port"` + + // Protocol describes the upstream's service protocol. + // Valid values are "http" and "http2", defaults to "http" + Protocol string `json:",omitempty"` + + // ParsedFromCheck is set if this path was parsed from a registered check + ParsedFromCheck bool +} + +type LogSinkType string + +const ( + DefaultLogSinkType LogSinkType = "" + FileLogSinkType LogSinkType = "file" + StdErrLogSinkType LogSinkType = "stderr" + StdOutLogSinkType LogSinkType = "stdout" +) + +// AccessLogsConfig contains the associated default settings for all Envoy instances within the datacenter or partition +type AccessLogsConfig struct { + // Enabled turns off all access logging + Enabled bool `json:",omitempty" alias:"enabled"` + + // DisableListenerLogs turns off just listener logs for connections rejected by Envoy because they don't + // have a matching listener filter. + DisableListenerLogs bool `json:",omitempty" alias:"disable_listener_logs"` + + // Type selects the output for logs: "file", "stderr". "stdout" + Type LogSinkType `json:",omitempty" alias:"type"` + + // Path is the output file to write logs + Path string `json:",omitempty" alias:"path"` + + // The presence of one format string or the other implies the access log string encoding. + // Defining Both is invalid. + JSONFormat string `json:",omitempty" alias:"json_format"` + TextFormat string `json:",omitempty" alias:"text_format"` +} + +type UpstreamConfiguration struct { + // Overrides is a slice of per-service configuration. The name field is + // required. + Overrides []*UpstreamConfig `json:",omitempty"` + + // Defaults contains default configuration for all upstreams of a given + // service. The name field must be empty. + Defaults *UpstreamConfig `json:",omitempty"` +} + +type UpstreamConfig struct { + // Name is only accepted within service-defaults.upstreamConfig.overrides . + Name string `json:",omitempty"` + + // Partition is only accepted within service-defaults.upstreamConfig.overrides . + Partition string `json:",omitempty"` + + // Namespace is only accepted within service-defaults.upstreamConfig.overrides . + Namespace string `json:",omitempty"` + + // Peer is only accepted within service-defaults.upstreamConfig.overrides . + Peer string `json:",omitempty"` + + // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's + // listener. + // + // Note: This escape hatch is NOT compatible with the discovery chain and + // will be ignored if a discovery chain is active. + EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"` + + // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's + // cluster. The Connect client TLS certificate and context will be injected + // overriding any TLS settings present. + // + // Note: This escape hatch is NOT compatible with the discovery chain and + // will be ignored if a discovery chain is active. + EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"` + + // Protocol describes the upstream's service protocol. Valid values are "tcp", + // "http" and "grpc". Anything else is treated as tcp. The enables protocol + // aware features like per-request metrics and connection pooling, tracing, + // routing etc. + Protocol string `json:",omitempty"` + + // ConnectTimeoutMs is the number of milliseconds to timeout making a new + // connection to this upstream. Defaults to 5000 (5 seconds) if not set. + ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"` + + // Limits are the set of limits that are applied to the proxy for a specific upstream of a + // service instance. + Limits *UpstreamLimits `json:",omitempty"` + + // PassiveHealthCheck configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` + + // MeshGatewayConfig controls how Mesh Gateways are configured and used + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" ` + + // BalanceOutboundConnections indicates that the proxy should attempt to evenly distribute + // outbound connections across worker threads. Only used by envoy proxies. + BalanceOutboundConnections string `json:",omitempty" alias:"balance_outbound_connections"` +} + +// DestinationConfig represents a virtual service, i.e. one that is external to Consul +type DestinationConfig struct { + // Addresses of the endpoint; hostname or IP + Addresses []string `json:",omitempty"` + + // Port allowed within this endpoint + Port int `json:",omitempty"` +} + +type PassiveHealthCheck struct { + // Interval between health check analysis sweeps. Each sweep may remove + // hosts or return hosts to the pool. + Interval time.Duration `json:",omitempty"` + + // MaxFailures is the count of consecutive failures that results in a host + // being removed from the pool. + MaxFailures uint32 `alias:"max_failures"` + + // EnforcingConsecutive5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. + EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` + + // The maximum % of an upstream cluster that can be ejected due to outlier detection. + // Defaults to 10% but will eject at least one host regardless of the value. + MaxEjectionPercent *uint32 `json:",omitempty" alias:"max_ejection_percent"` + + // The base time that a host is ejected for. The real time is equal to the base time + // multiplied by the number of times the host has been ejected and is capped by + // max_ejection_time (Default 300s). Defaults to 30000ms or 30s. + BaseEjectionTime *time.Duration `json:",omitempty" alias:"base_ejection_time"` +} + +// UpstreamLimits describes the limits that are associated with a specific +// upstream of a service instance. +type UpstreamLimits struct { + // MaxConnections is the maximum number of connections the local proxy can + // make to the upstream service. + MaxConnections *int `alias:"max_connections"` + + // MaxPendingRequests is the maximum number of requests that will be queued + // waiting for an available connection. This is mostly applicable to HTTP/1.1 + // clusters since all HTTP/2 requests are streamed over a single + // connection. + MaxPendingRequests *int `alias:"max_pending_requests"` + + // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed + // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 + // clusters since all HTTP/1.1 requests are limited by MaxConnections. + MaxConcurrentRequests *int `alias:"max_concurrent_requests"` +} + +// RateLimits is rate limiting configuration that is applied to +// inbound traffic for a service. +// Rate limiting is a Consul enterprise feature. +type RateLimits struct { + InstanceLevel InstanceLevelRateLimits `alias:"instance_level"` +} + +// InstanceLevelRateLimits represents rate limit configuration +// that are applied per service instance. +type InstanceLevelRateLimits struct { + // RequestsPerSecond is the average number of requests per second that can be + // made without being throttled. This field is required if RequestsMaxBurst + // is set. The allowed number of requests may exceed RequestsPerSecond up to + // the value specified in RequestsMaxBurst. + // + // Internally, this is the refill rate of the token bucket used for rate limiting. + RequestsPerSecond int `alias:"requests_per_second"` + + // RequestsMaxBurst is the maximum number of requests that can be sent + // in a burst. Should be equal to or greater than RequestsPerSecond. + // If unset, defaults to RequestsPerSecond. + // + // Internally, this is the maximum size of the token bucket used for rate limiting. + RequestsMaxBurst int `alias:"requests_max_burst"` + + // Routes is a list of rate limits applied to specific routes. + // Overrides any top-level configuration. + Routes []InstanceLevelRouteRateLimits +} + +// InstanceLevelRouteRateLimits represents rate limit configuration +// applied to a route matching one of PathExact/PathPrefix/PathRegex. +type InstanceLevelRouteRateLimits struct { + PathExact string `alias:"path_exact"` + PathPrefix string `alias:"path_prefix"` + PathRegex string `alias:"path_regex"` + + RequestsPerSecond int `alias:"requests_per_second"` + RequestsMaxBurst int `alias:"requests_max_burst"` +} + +type ServiceConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Protocol string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` + UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` + Destination *DestinationConfig `json:",omitempty"` + MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"` + LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` + LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` + BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` + RateLimits *RateLimits `json:",omitempty" alias:"rate_limits"` + EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +func (s *ServiceConfigEntry) GetKind() string { return s.Kind } +func (s *ServiceConfigEntry) GetName() string { return s.Name } +func (s *ServiceConfigEntry) GetPartition() string { return s.Partition } +func (s *ServiceConfigEntry) GetNamespace() string { return s.Namespace } +func (s *ServiceConfigEntry) GetMeta() map[string]string { return s.Meta } +func (s *ServiceConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } +func (s *ServiceConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } + +type ProxyConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` + Config map[string]interface{} `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + AccessLogs *AccessLogsConfig `json:",omitempty" alias:"access_logs"` + EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` + FailoverPolicy *ServiceResolverFailoverPolicy `json:",omitempty" alias:"failover_policy"` + PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *ProxyConfigEntry) GetKind() string { return p.Kind } +func (p *ProxyConfigEntry) GetName() string { return ProxyConfigGlobal } +func (p *ProxyConfigEntry) GetPartition() string { return p.Partition } +func (p *ProxyConfigEntry) GetNamespace() string { return p.Namespace } +func (p *ProxyConfigEntry) GetMeta() map[string]string { return p.Meta } +func (p *ProxyConfigEntry) GetCreateIndex() uint64 { return p.CreateIndex } +func (p *ProxyConfigEntry) GetModifyIndex() uint64 { return p.ModifyIndex } + +func makeConfigEntry(kind, name string) (ConfigEntry, error) { + switch kind { + case ServiceDefaults: + return &ServiceConfigEntry{Kind: kind, Name: name}, nil + case ProxyDefaults: + return &ProxyConfigEntry{Kind: kind, Name: name}, nil + case ServiceRouter: + return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil + case ServiceSplitter: + return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil + case ServiceResolver: + return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil + case IngressGateway: + return &IngressGatewayConfigEntry{Kind: kind, Name: name}, nil + case TerminatingGateway: + return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil + case ServiceIntentions: + return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil + case MeshConfig: + return &MeshConfigEntry{}, nil + case ExportedServices: + return &ExportedServicesConfigEntry{Name: name}, nil + case SamenessGroup: + return &SamenessGroupConfigEntry{Kind: kind, Name: name}, nil + case APIGateway: + return &APIGatewayConfigEntry{Kind: kind, Name: name}, nil + case TCPRoute: + return &TCPRouteConfigEntry{Kind: kind, Name: name}, nil + case InlineCertificate: + return &InlineCertificateConfigEntry{Kind: kind, Name: name}, nil + case HTTPRoute: + return &HTTPRouteConfigEntry{Kind: kind, Name: name}, nil + case RateLimitIPConfig: + return &RateLimitIPConfigEntry{Kind: kind, Name: name}, nil + case JWTProvider: + return &JWTProviderConfigEntry{Kind: kind, Name: name}, nil + default: + return nil, fmt.Errorf("invalid config entry kind: %s", kind) + } +} + +func MakeConfigEntry(kind, name string) (ConfigEntry, error) { + return makeConfigEntry(kind, name) +} + +// DecodeConfigEntry will decode the result of using json.Unmarshal of a config +// entry into a map[string]interface{}. +// +// Important caveats: +// +// - This will NOT work if the map[string]interface{} was produced using HCL +// decoding as that requires more extensive parsing to work around the issues +// with map[string][]interface{} that arise. +// +// - This will only decode fields using their camel case json field +// representations. +func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { + var entry ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := makeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToTimeHookFunc(time.RFC3339), + ), + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + return entry, decoder.Decode(raw) +} + +func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + return DecodeConfigEntry(raw) +} + +func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) { + var entries []ConfigEntry + for _, rawEntry := range raw { + entry, err := DecodeConfigEntry(rawEntry) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} + +// ConfigEntries can be used to query the Config endpoints +type ConfigEntries struct { + c *Client +} + +// Config returns a handle to the Config endpoints +func (c *Client) ConfigEntries() *ConfigEntries { + return &ConfigEntries{c} +} + +func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { + if kind == "" || name == "" { + return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + entry, err := makeConfigEntry(kind, name) + if err != nil { + return nil, nil, err + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setQueryOptions(q) + rtt, resp, err := conf.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, entry); err != nil { + return nil, nil, err + } + + return entry, qm, nil +} + +func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { + if kind == "" { + return nil, nil, fmt.Errorf("The kind parameter must not be empty") + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) + r.setQueryOptions(q) + rtt, resp, err := conf.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var raw []map[string]interface{} + if err := decodeBody(resp, &raw); err != nil { + return nil, nil, err + } + + entries, err := decodeConfigEntrySlice(raw) + if err != nil { + return nil, nil, err + } + + return entries, qm, nil +} + +func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, nil, w) +} + +func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + r := conf.c.newRequest("PUT", "/v1/config") + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + r.obj = entry + rtt, resp, err := conf.c.doRequest(r) + if err != nil { + return false, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return false, nil, err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} + +func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { + _, wm, err := conf.delete(kind, name, nil, w) + return wm, err +} + +// DeleteCAS performs a Check-And-Set deletion of the given config entry, and +// returns true if it was successful. If the provided index no longer matches +// the entry's ModifyIndex (i.e. it was modified by another process) then the +// operation will fail and return false. +func (conf *ConfigEntries) DeleteCAS(kind, name string, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.delete(kind, name, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) delete(kind, name string, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + if kind == "" || name == "" { + return false, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + + rtt, resp, err := conf.c.doRequest(r) + if err != nil { + return false, nil, err + } + defer closeResponseBody(resp) + + if err := requireOK(resp); err != nil { + return false, nil, err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + + res := strings.Contains(buf.String(), "true") + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go new file mode 100644 index 0000000000..eeb3a1074c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -0,0 +1,384 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "time" + + "github.com/hashicorp/go-multierror" +) + +type ServiceRouterConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + + Routes []ServiceRoute `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } +func (e *ServiceRouterConfigEntry) GetPartition() string { return e.Partition } +func (e *ServiceRouterConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceRoute struct { + Match *ServiceRouteMatch `json:",omitempty"` + Destination *ServiceRouteDestination `json:",omitempty"` +} + +type ServiceRouteMatch struct { + HTTP *ServiceRouteHTTPMatch `json:",omitempty"` +} + +type ServiceRouteHTTPMatch struct { + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` + + Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` + QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"` + Methods []string `json:",omitempty"` +} + +type ServiceRouteHTTPMatchHeader struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Prefix string `json:",omitempty"` + Suffix string `json:",omitempty"` + Regex string `json:",omitempty"` + Invert bool `json:",omitempty"` +} + +type ServiceRouteHTTPMatchQueryParam struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Regex string `json:",omitempty"` +} + +type ServiceRouteDestination struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` + RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + IdleTimeout time.Duration `json:",omitempty" alias:"idle_timeout"` + NumRetries uint32 `json:",omitempty" alias:"num_retries"` + RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"` + RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"` + RetryOn []string `json:",omitempty" alias:"retry_on"` + RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` + ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` +} + +func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) { + type Alias ServiceRouteDestination + exported := &struct { + RequestTimeout string `json:",omitempty"` + IdleTimeout string `json:",omitempty"` + *Alias + }{ + RequestTimeout: e.RequestTimeout.String(), + IdleTimeout: e.IdleTimeout.String(), + Alias: (*Alias)(e), + } + if e.RequestTimeout == 0 { + exported.RequestTimeout = "" + } + if e.IdleTimeout == 0 { + exported.IdleTimeout = "" + } + + return json.Marshal(exported) +} + +func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { + type Alias ServiceRouteDestination + aux := &struct { + RequestTimeout string + IdleTimeout string + *Alias + }{ + Alias: (*Alias)(e), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.RequestTimeout != "" { + if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil { + return err + } + } + if aux.IdleTimeout != "" { + if e.IdleTimeout, err = time.ParseDuration(aux.IdleTimeout); err != nil { + return err + } + } + return nil +} + +type ServiceSplitterConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + + Splits []ServiceSplit `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } +func (e *ServiceSplitterConfigEntry) GetPartition() string { return e.Partition } +func (e *ServiceSplitterConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceSplit struct { + Weight float32 + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` + ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` +} + +type ServiceResolverConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + + DefaultSubset string `json:",omitempty" alias:"default_subset"` + Subsets map[string]ServiceResolverSubset `json:",omitempty"` + Redirect *ServiceResolverRedirect `json:",omitempty"` + Failover map[string]ServiceResolverFailover `json:",omitempty"` + ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` + RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + + // PrioritizeByLocality controls whether the locality of services within the + // local partition will be used to prioritize connectivity. + PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` + + // LoadBalancer determines the load balancing policy and configuration for services + // issuing requests to this upstream service. + LoadBalancer *LoadBalancer `json:",omitempty" alias:"load_balancer"` + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ServiceResolverConfigEntry + exported := &struct { + ConnectTimeout string `json:",omitempty"` + RequestTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: e.ConnectTimeout.String(), + RequestTimeout: e.RequestTimeout.String(), + Alias: (*Alias)(e), + } + if e.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + if e.RequestTimeout == 0 { + exported.RequestTimeout = "" + } + + return json.Marshal(exported) +} + +func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error { + type Alias ServiceResolverConfigEntry + aux := &struct { + ConnectTimeout string + RequestTimeout string + *Alias + }{ + Alias: (*Alias)(e), + } + var err error + if err = json.Unmarshal(data, &aux); err != nil { + return err + } + var merr *multierror.Error + if aux.ConnectTimeout != "" { + if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + merr = multierror.Append(merr, err) + } + } + if aux.RequestTimeout != "" { + if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr.ErrorOrNil() +} + +func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } +func (e *ServiceResolverConfigEntry) GetPartition() string { return e.Partition } +func (e *ServiceResolverConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceResolverSubset struct { + Filter string `json:",omitempty"` + OnlyPassing bool `json:",omitempty" alias:"only_passing"` +} + +type ServiceResolverRedirect struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` +} + +type ServiceResolverFailover struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + // Referencing other partitions is not supported. + Namespace string `json:",omitempty"` + Datacenters []string `json:",omitempty"` + Targets []ServiceResolverFailoverTarget `json:",omitempty"` + Policy *ServiceResolverFailoverPolicy `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` +} + +type ServiceResolverFailoverTarget struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` +} + +type ServiceResolverFailoverPolicy struct { + // Mode specifies the type of failover that will be performed. Valid values are + // "sequential", "" (equivalent to "sequential") and "order-by-locality". + Mode string `json:",omitempty"` + Regions []string `json:",omitempty"` +} + +type ServiceResolverPrioritizeByLocality struct { + // Mode specifies the type of prioritization that will be performed + // when selecting nodes in the local partition. + // Valid values are: "" (default "none"), "none", and "failover". + Mode string `json:",omitempty"` +} + +// LoadBalancer determines the load balancing policy and configuration for services +// issuing requests to this upstream service. +type LoadBalancer struct { + // Policy is the load balancing policy used to select a host + Policy string `json:",omitempty"` + + // RingHashConfig contains configuration for the "ring_hash" policy type + RingHashConfig *RingHashConfig `json:",omitempty" alias:"ring_hash_config"` + + // LeastRequestConfig contains configuration for the "least_request" policy type + LeastRequestConfig *LeastRequestConfig `json:",omitempty" alias:"least_request_config"` + + // HashPolicies is a list of hash policies to use for hashing load balancing algorithms. + // Hash policies are evaluated individually and combined such that identical lists + // result in the same hash. + // If no hash policies are present, or none are successfully evaluated, + // then a random backend host will be selected. + HashPolicies []HashPolicy `json:",omitempty" alias:"hash_policies"` +} + +// RingHashConfig contains configuration for the "ring_hash" policy type +type RingHashConfig struct { + // MinimumRingSize determines the minimum number of entries in the hash ring + MinimumRingSize uint64 `json:",omitempty" alias:"minimum_ring_size"` + + // MaximumRingSize determines the maximum number of entries in the hash ring + MaximumRingSize uint64 `json:",omitempty" alias:"maximum_ring_size"` +} + +// LeastRequestConfig contains configuration for the "least_request" policy type +type LeastRequestConfig struct { + // ChoiceCount determines the number of random healthy hosts from which to select the one with the least requests. + ChoiceCount uint32 `json:",omitempty" alias:"choice_count"` +} + +// HashPolicy defines which attributes will be hashed by hash-based LB algorithms +type HashPolicy struct { + // Field is the attribute type to hash on. + // Must be one of "header","cookie", or "query_parameter". + // Cannot be specified along with SourceIP. + Field string `json:",omitempty"` + + // FieldValue is the value to hash. + // ie. header name, cookie name, URL query parameter name + // Cannot be specified along with SourceIP. + FieldValue string `json:",omitempty" alias:"field_value"` + + // CookieConfig contains configuration for the "cookie" hash policy type. + CookieConfig *CookieConfig `json:",omitempty" alias:"cookie_config"` + + // SourceIP determines whether the hash should be of the source IP rather than of a field and field value. + // Cannot be specified along with Field or FieldValue. + SourceIP bool `json:",omitempty" alias:"source_ip"` + + // Terminal will short circuit the computation of the hash when multiple hash policies are present. + // If a hash is computed when a Terminal policy is evaluated, + // then that hash will be used and subsequent hash policies will be ignored. + Terminal bool `json:",omitempty"` +} + +// CookieConfig contains configuration for the "cookie" hash policy type. +// This is specified to have Envoy generate a cookie for a client on its first request. +type CookieConfig struct { + // Generates a session cookie with no expiration. + Session bool `json:",omitempty"` + + // TTL for generated cookies. Cannot be specified for session cookies. + TTL time.Duration `json:",omitempty"` + + // The path to set for the cookie + Path string `json:",omitempty"` +} + +// HTTPHeaderModifiers is a set of rules for HTTP header modification that +// should be performed by proxies as the request passes through them. It can +// operate on either request or response headers depending on the context in +// which it is used. +type HTTPHeaderModifiers struct { + // Add is a set of name -> value pairs that should be appended to the request + // or response (i.e. allowing duplicates if the same header already exists). + Add map[string]string `json:",omitempty"` + + // Set is a set of name -> value pairs that should be added to the request or + // response, overwriting any existing header values of the same name. + Set map[string]string `json:",omitempty"` + + // Remove is the set of header names that should be stripped from the request + // or response. + Remove []string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go new file mode 100644 index 0000000000..97920e40dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "encoding/json" + +// ExportedServicesConfigEntry manages the exported services for a single admin partition. +// Admin Partitions are a Consul Enterprise feature. +type ExportedServicesConfigEntry struct { + // Name is the name of the partition the ExportedServicesConfigEntry applies to. + // Partitioning is a Consul Enterprise feature. + Name string `json:",omitempty"` + + // Partition is the partition where the ExportedServicesConfigEntry is stored. + // If the partition does not match the name, the name will overwrite the partition. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Services is a list of services to be exported and the list of partitions + // to expose them to. + Services []ExportedService `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +// ExportedService manages the exporting of a service in the local partition to +// other partitions. +type ExportedService struct { + // Name is the name of the service to be exported. + Name string + + // Namespace is the namespace to export the service from. + Namespace string `json:",omitempty"` + + // Consumers is a list of downstream consumers of the service to be exported. + Consumers []ServiceConsumer `json:",omitempty"` +} + +// ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or Peer must be specified. +type ServiceConsumer struct { + // Partition is the admin partition to export the service to. + Partition string `json:",omitempty"` + + // Peer is the name of the peer to export the service to. + Peer string `json:",omitempty" alias:"peer_name"` + + // SamenessGroup is the name of the sameness group to export the service to. + SamenessGroup string `json:",omitempty" alias:"sameness_group"` +} + +func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } +func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" } +func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +// MarshalJSON adds the Kind field so that the JSON can be decoded back into the +// correct type. +func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ExportedServicesConfigEntry + source := &struct { + Kind string + *Alias + }{ + Kind: ExportedServices, + Alias: (*Alias)(e), + } + return json.Marshal(source) +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go new file mode 100644 index 0000000000..baf274e2da --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -0,0 +1,344 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// IngressGatewayConfigEntry manages the configuration for an ingress service +// with the given name. +type IngressGatewayConfigEntry struct { + // Kind of the config entry. This should be set to api.IngressGateway. + Kind string + + // Name is used to match the config entry with its associated ingress gateway + // service. This should match the name provided in the service definition. + Name string + + // Partition is the partition the IngressGateway is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the IngressGateway is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // TLS holds the TLS configuration for this gateway. + TLS GatewayTLSConfig + + // Listeners declares what ports the ingress gateway should listen on, and + // what services to associated to those ports. + Listeners []IngressListener + + Meta map[string]string `json:",omitempty"` + + // Defaults is default configuration for all upstream services + Defaults *IngressServiceConfig `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +type IngressServiceConfig struct { + MaxConnections *uint32 + MaxPendingRequests *uint32 + MaxConcurrentRequests *uint32 + + // PassiveHealthCheck configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` +} + +type GatewayTLSConfig struct { + // Indicates that TLS should be enabled for this gateway service. + Enabled bool + + // SDS allows configuring TLS certificate from an SDS service. + SDS *GatewayTLSSDSConfig `json:",omitempty"` + + TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` + + // Define a subset of cipher suites to restrict + // Only applicable to connections negotiated via TLS 1.2 or earlier + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` +} + +type GatewayServiceTLSConfig struct { + // SDS allows configuring TLS certificate from an SDS service. + SDS *GatewayTLSSDSConfig `json:",omitempty"` +} + +type GatewayTLSSDSConfig struct { + ClusterName string `json:",omitempty" alias:"cluster_name"` + CertResource string `json:",omitempty" alias:"cert_resource"` +} + +// IngressListener manages the configuration for a listener on a specific port. +type IngressListener struct { + // Port declares the port on which the ingress gateway should listen for traffic. + Port int + + // Protocol declares what type of traffic this listener is expected to + // receive. Depending on the protocol, a listener might support multiplexing + // services over a single port, or additional discovery chain features. The + // current supported values are: (tcp | http | http2 | grpc). + Protocol string + + // Services declares the set of services to which the listener forwards + // traffic. + // + // For "tcp" protocol listeners, only a single service is allowed. + // For "http" listeners, multiple services can be declared. + Services []IngressService + + // TLS allows specifying some TLS configuration per listener. + TLS *GatewayTLSConfig `json:",omitempty"` +} + +// IngressService manages configuration for services that are exposed to +// ingress traffic. +type IngressService struct { + // Name declares the service to which traffic should be forwarded. + // + // This can either be a specific service, or the wildcard specifier, + // "*". If the wildcard specifier is provided, the listener must be of "http" + // protocol and means that the listener will forward traffic to all services. + // + // A name can be specified on multiple listeners, and will be exposed on both + // of the listeners. + Name string + + // Hosts is a list of hostnames which should be associated to this service on + // the defined listener. Only allowed on layer 7 protocols, this will be used + // to route traffic to the service by matching the Host header of the HTTP + // request. + // + // If a host is provided for a service that also has a wildcard specifier + // defined, the host will override the wildcard-specifier-provided + // ".*" domain for that listener. + // + // This cannot be specified when using the wildcard specifier, "*", or when + // using a "tcp" listener. + Hosts []string + + // Namespace is the namespace where the service is located. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition where the service is located. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // TLS allows specifying some TLS configuration per listener. + TLS *GatewayServiceTLSConfig `json:",omitempty"` + + // Allow HTTP header manipulation to be configured. + RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` + ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` + + MaxConnections *uint32 `json:",omitempty" alias:"max_connections"` + MaxPendingRequests *uint32 `json:",omitempty" alias:"max_pending_requests"` + MaxConcurrentRequests *uint32 `json:",omitempty" alias:"max_concurrent_requests"` + + // PassiveHealthCheck configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` +} + +func (i *IngressGatewayConfigEntry) GetKind() string { return i.Kind } +func (i *IngressGatewayConfigEntry) GetName() string { return i.Name } +func (i *IngressGatewayConfigEntry) GetPartition() string { return i.Partition } +func (i *IngressGatewayConfigEntry) GetNamespace() string { return i.Namespace } +func (i *IngressGatewayConfigEntry) GetMeta() map[string]string { return i.Meta } +func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 { return i.CreateIndex } +func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 { return i.ModifyIndex } + +// TerminatingGatewayConfigEntry manages the configuration for a terminating gateway +// with the given name. +type TerminatingGatewayConfigEntry struct { + // Kind of the config entry. This should be set to api.TerminatingGateway. + Kind string + + // Name is used to match the config entry with its associated terminating gateway + // service. This should match the name provided in the service definition. + Name string + + // Services is a list of service names represented by the terminating gateway. + Services []LinkedService `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// A LinkedService is a service represented by a terminating gateway +type LinkedService struct { + // Referencing other partitions is not supported. + + // Namespace is where the service is registered. + Namespace string `json:",omitempty"` + + // Name is the name of the service, as defined in Consul's catalog. + Name string `json:",omitempty"` + + // CAFile is the optional path to a CA certificate to use for TLS connections + // from the gateway to the linked service. + CAFile string `json:",omitempty" alias:"ca_file"` + + // CertFile is the optional path to a client certificate to use for TLS connections + // from the gateway to the linked service. + CertFile string `json:",omitempty" alias:"cert_file"` + + // KeyFile is the optional path to a private key to use for TLS connections + // from the gateway to the linked service. + KeyFile string `json:",omitempty" alias:"key_file"` + + // SNI is the optional name to specify during the TLS handshake with a linked service. + SNI string `json:",omitempty"` +} + +func (g *TerminatingGatewayConfigEntry) GetKind() string { return g.Kind } +func (g *TerminatingGatewayConfigEntry) GetName() string { return g.Name } +func (g *TerminatingGatewayConfigEntry) GetPartition() string { return g.Partition } +func (g *TerminatingGatewayConfigEntry) GetNamespace() string { return g.Namespace } +func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string { return g.Meta } +func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex } +func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 { return g.ModifyIndex } + +// APIGatewayConfigEntry manages the configuration for an API gateway +// with the given name. +type APIGatewayConfigEntry struct { + // Kind of the config entry. This should be set to api.APIGateway. + Kind string + + // Name is used to match the config entry with its associated api gateway + // service. This should match the name provided in the service definition. + Name string + + Meta map[string]string `json:",omitempty"` + + // Listeners is the set of listener configuration to which an API Gateway + // might bind. + Listeners []APIGatewayListener + // Status is the asynchronous status which an APIGateway propagates to the user. + Status ConfigEntryStatus + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +func (g *APIGatewayConfigEntry) GetKind() string { return g.Kind } +func (g *APIGatewayConfigEntry) GetName() string { return g.Name } +func (g *APIGatewayConfigEntry) GetPartition() string { return g.Partition } +func (g *APIGatewayConfigEntry) GetNamespace() string { return g.Namespace } +func (g *APIGatewayConfigEntry) GetMeta() map[string]string { return g.Meta } +func (g *APIGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex } +func (g *APIGatewayConfigEntry) GetModifyIndex() uint64 { return g.ModifyIndex } + +// APIGatewayListener represents an individual listener for an APIGateway +type APIGatewayListener struct { + // Name is the name of the listener in a given gateway. This must be + // unique within a gateway. + Name string + // Hostname is the host name that a listener should be bound to, if + // unspecified, the listener accepts requests for all hostnames. + Hostname string + // Port is the port at which this listener should bind. + Port int + // Protocol is the protocol that a listener should use, it must + // either be "http" or "tcp" + Protocol string + // TLS is the TLS settings for the listener. + TLS APIGatewayTLSConfiguration + // Override is the policy that overrides all other policy and route specific configuration + Override *APIGatewayPolicy `json:",omitempty"` + // Default is the policy that is the default for the listener and route, routes can override this behavior + Default *APIGatewayPolicy `json:",omitempty"` +} + +// APIGatewayTLSConfiguration specifies the configuration of a listener’s +// TLS settings. +type APIGatewayTLSConfiguration struct { + // Certificates is a set of references to certificates + // that a gateway listener uses for TLS termination. + Certificates []ResourceReference + // MaxVersion is the maximum TLS version that the listener + // should support. + MaxVersion string `json:",omitempty" alias:"tls_max_version"` + // MinVersion is the minimum TLS version that the listener + // should support. + MinVersion string `json:",omitempty" alias:"tls_min_version"` + // Define a subset of cipher suites to restrict + // Only applicable to connections negotiated via TLS 1.2 or earlier + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` +} + +// APIGatewayPolicy holds the policy that configures the gateway listener, this is used in the `Override` and `Default` fields of a listener +type APIGatewayPolicy struct { + // JWT holds the JWT configuration for the Listener + JWT *APIGatewayJWTRequirement `json:",omitempty"` +} + +// APIGatewayJWTRequirement holds the list of JWT providers to be verified against +type APIGatewayJWTRequirement struct { + // Providers is a list of providers to consider when verifying a JWT. + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + +// APIGatewayJWTProvider holds the provider and claim verification information +type APIGatewayJWTProvider struct { + // Name is the name of the JWT provider. There MUST be a corresponding + // "jwt-provider" config entry with this name. + Name string `json:",omitempty"` + + // VerifyClaims is a list of additional claims to verify in a JWT's payload. + VerifyClaims []*APIGatewayJWTClaimVerification `json:",omitempty" alias:"verify_claims"` +} + +// APIGatewayJWTClaimVerification holds the actual claim information to be verified +type APIGatewayJWTClaimVerification struct { + // Path is the path to the claim in the token JSON. + Path []string `json:",omitempty"` + + // Value is the expected value at the given path: + // - If the type at the path is a list then we verify + // that this value is contained in the list. + // + // - If the type at the path is a string then we verify + // that this value matches. + Value string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go b/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go new file mode 100644 index 0000000000..47a1ead056 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// InlineCertificateConfigEntry -- TODO stub +type InlineCertificateConfigEntry struct { + // Kind of the config entry. This should be set to api.InlineCertificate. + Kind string + + // Name is used to match the config entry with its associated tcp-route + // service. This should match the name provided in the service definition. + Name string + + // Certificate is the public certificate component of an x509 key pair encoded in raw PEM format. + Certificate string + // PrivateKey is the private key component of an x509 key pair encoded in raw PEM format. + PrivateKey string `alias:"private_key"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +func (a *InlineCertificateConfigEntry) GetKind() string { return InlineCertificate } +func (a *InlineCertificateConfigEntry) GetName() string { return a.Name } +func (a *InlineCertificateConfigEntry) GetPartition() string { return a.Partition } +func (a *InlineCertificateConfigEntry) GetNamespace() string { return a.Namespace } +func (a *InlineCertificateConfigEntry) GetMeta() map[string]string { return a.Meta } +func (a *InlineCertificateConfigEntry) GetCreateIndex() uint64 { return a.CreateIndex } +func (a *InlineCertificateConfigEntry) GetModifyIndex() uint64 { return a.ModifyIndex } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go new file mode 100644 index 0000000000..3f03b0875b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "time" + +type ServiceIntentionsConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + + Sources []*SourceIntention + JWT *IntentionJWTRequirement `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + + CreateIndex uint64 + ModifyIndex uint64 +} + +type SourceIntention struct { + Name string + Peer string `json:",omitempty"` + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` + Action IntentionAction `json:",omitempty"` + Permissions []*IntentionPermission `json:",omitempty"` + Precedence int + Type IntentionSourceType + Description string `json:",omitempty"` + + LegacyID string `json:",omitempty" alias:"legacy_id"` + LegacyMeta map[string]string `json:",omitempty" alias:"legacy_meta"` + LegacyCreateTime *time.Time `json:",omitempty" alias:"legacy_create_time"` + LegacyUpdateTime *time.Time `json:",omitempty" alias:"legacy_update_time"` +} + +func (e *ServiceIntentionsConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceIntentionsConfigEntry) GetName() string { return e.Name } +func (e *ServiceIntentionsConfigEntry) GetPartition() string { return e.Partition } +func (e *ServiceIntentionsConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceIntentionsConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceIntentionsConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type IntentionPermission struct { + Action IntentionAction + HTTP *IntentionHTTPPermission `json:",omitempty"` + JWT *IntentionJWTRequirement `json:",omitempty"` +} + +type IntentionHTTPPermission struct { + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` + + Header []IntentionHTTPHeaderPermission `json:",omitempty"` + + Methods []string `json:",omitempty"` +} + +type IntentionHTTPHeaderPermission struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Prefix string `json:",omitempty"` + Suffix string `json:",omitempty"` + Regex string `json:",omitempty"` + Invert bool `json:",omitempty"` +} + +type IntentionJWTRequirement struct { + // Providers is a list of providers to consider when verifying a JWT. + Providers []*IntentionJWTProvider `json:",omitempty"` +} + +type IntentionJWTProvider struct { + // Name is the name of the JWT provider. There MUST be a corresponding + // "jwt-provider" config entry with this name. + Name string `json:",omitempty"` + + // VerifyClaims is a list of additional claims to verify in a JWT's payload. + VerifyClaims []*IntentionJWTClaimVerification `json:",omitempty" alias:"verify_claims"` +} + +type IntentionJWTClaimVerification struct { + // Path is the path to the claim in the token JSON. + Path []string `json:",omitempty"` + + // Value is the expected value at the given path: + // - If the type at the path is a list then we verify + // that this value is contained in the list. + // + // - If the type at the path is a string then we verify + // that this value matches. + Value string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go new file mode 100644 index 0000000000..270f0d5641 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go @@ -0,0 +1,310 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "time" +) + +const ( + DiscoveryTypeStrictDNS ClusterDiscoveryType = "STRICT_DNS" + DiscoveryTypeStatic ClusterDiscoveryType = "STATIC" + DiscoveryTypeLogicalDNS ClusterDiscoveryType = "LOGICAL_DNS" + DiscoveryTypeEDS ClusterDiscoveryType = "EDS" + DiscoveryTypeOriginalDST ClusterDiscoveryType = "ORIGINAL_DST" +) + +type JWTProviderConfigEntry struct { + // Kind is the kind of configuration entry and must be "jwt-provider". + Kind string `json:",omitempty"` + + // Name is the name of the provider being configured. + Name string `json:",omitempty"` + + // JSONWebKeySet defines a JSON Web Key Set, its location on disk, or the + // means with which to fetch a key set from a remote server. + JSONWebKeySet *JSONWebKeySet `json:",omitempty" alias:"json_web_key_set"` + + // Issuer is the entity that must have issued the JWT. + // This value must match the "iss" claim of the token. + Issuer string `json:",omitempty"` + + // Audiences is the set of audiences the JWT is allowed to access. + // If specified, all JWTs verified with this provider must address + // at least one of these to be considered valid. + Audiences []string `json:",omitempty"` + + // Locations where the JWT will be present in requests. + // Envoy will check all of these locations to extract a JWT. + // If no locations are specified Envoy will default to: + // 1. Authorization header with Bearer schema: + // "Authorization: Bearer " + // 2. access_token query parameter. + Locations []*JWTLocation `json:",omitempty"` + + // Forwarding defines rules for forwarding verified JWTs to the backend. + Forwarding *JWTForwardingConfig `json:",omitempty"` + + // ClockSkewSeconds specifies the maximum allowable time difference + // from clock skew when validating the "exp" (Expiration) and "nbf" + // (Not Before) claims. + // + // Default value is 30 seconds. + ClockSkewSeconds int `json:",omitempty" alias:"clock_skew_seconds"` + + // CacheConfig defines configuration for caching the validation + // result for previously seen JWTs. Caching results can speed up + // verification when individual tokens are expected to be handled + // multiple times. + CacheConfig *JWTCacheConfig `json:",omitempty" alias:"cache_config"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 `json:",omitempty"` + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 `json:",omitempty"` + + // Partition is the partition the JWTProviderConfigEntry applies to. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the JWTProviderConfigEntry applies to. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// JWTLocation is a location where the JWT could be present in requests. +// +// Only one of Header, QueryParam, or Cookie can be specified. +type JWTLocation struct { + // Header defines how to extract a JWT from an HTTP request header. + Header *JWTLocationHeader `json:",omitempty"` + + // QueryParam defines how to extract a JWT from an HTTP request + // query parameter. + QueryParam *JWTLocationQueryParam `json:",omitempty" alias:"query_param"` + + // Cookie defines how to extract a JWT from an HTTP request cookie. + Cookie *JWTLocationCookie `json:",omitempty"` +} + +// JWTLocationHeader defines how to extract a JWT from an HTTP +// request header. +type JWTLocationHeader struct { + // Name is the name of the header containing the token. + Name string `json:",omitempty"` + + // ValuePrefix is an optional prefix that precedes the token in the + // header value. + // For example, "Bearer " is a standard value prefix for a header named + // "Authorization", but the prefix is not part of the token itself: + // "Authorization: Bearer " + ValuePrefix string `json:",omitempty" alias:"value_prefix"` + + // Forward defines whether the header with the JWT should be + // forwarded after the token has been verified. If false, the + // header will not be forwarded to the backend. + // + // Default value is false. + Forward bool `json:",omitempty"` +} + +// JWTLocationQueryParam defines how to extract a JWT from an HTTP request query parameter. +type JWTLocationQueryParam struct { + // Name is the name of the query param containing the token. + Name string `json:",omitempty"` +} + +// JWTLocationCookie defines how to extract a JWT from an HTTP request cookie. +type JWTLocationCookie struct { + // Name is the name of the cookie containing the token. + Name string `json:",omitempty"` +} + +type JWTForwardingConfig struct { + // HeaderName is a header name to use when forwarding a verified + // JWT to the backend. The verified JWT could have been extracted + // from any location (query param, header, or cookie). + // + // The header value will be base64-URL-encoded, and will not be + // padded unless PadForwardPayloadHeader is true. + HeaderName string `json:",omitempty" alias:"header_name"` + + // PadForwardPayloadHeader determines whether padding should be added + // to the base64 encoded token forwarded with ForwardPayloadHeader. + // + // Default value is false. + PadForwardPayloadHeader bool `json:",omitempty" alias:"pad_forward_payload_header"` +} + +// JSONWebKeySet defines a key set, its location on disk, or the +// means with which to fetch a key set from a remote server. +// +// Exactly one of Local or Remote must be specified. +type JSONWebKeySet struct { + // Local specifies a local source for the key set. + Local *LocalJWKS `json:",omitempty"` + + // Remote specifies how to fetch a key set from a remote server. + Remote *RemoteJWKS `json:",omitempty"` +} + +// LocalJWKS specifies a location for a local JWKS. +// +// Only one of String and Filename can be specified. +type LocalJWKS struct { + // JWKS contains a base64 encoded JWKS. + JWKS string `json:",omitempty"` + + // Filename configures a location on disk where the JWKS can be + // found. If specified, the file must be present on the disk of ALL + // proxies with intentions referencing this provider. + Filename string `json:",omitempty"` +} + +// RemoteJWKS specifies how to fetch a JWKS from a remote server. +type RemoteJWKS struct { + // URI is the URI of the server to query for the JWKS. + URI string `json:",omitempty"` + + // RequestTimeoutMs is the number of milliseconds to + // time out when making a request for the JWKS. + RequestTimeoutMs int `json:",omitempty" alias:"request_timeout_ms"` + + // CacheDuration is the duration after which cached keys + // should be expired. + // + // Default value is 5 minutes. + CacheDuration time.Duration `json:",omitempty" alias:"cache_duration"` + + // FetchAsynchronously indicates that the JWKS should be fetched + // when a client request arrives. Client requests will be paused + // until the JWKS is fetched. + // If false, the proxy listener will wait for the JWKS to be + // fetched before being activated. + // + // Default value is false. + FetchAsynchronously bool `json:",omitempty" alias:"fetch_asynchronously"` + + // RetryPolicy defines a retry policy for fetching JWKS. + // + // There is no retry by default. + RetryPolicy *JWKSRetryPolicy `json:",omitempty" alias:"retry_policy"` + + // JWKSCluster defines how the specified Remote JWKS URI is to be fetched. + JWKSCluster *JWKSCluster `json:",omitempty" alias:"jwks_cluster"` +} + +type JWKSCluster struct { + // DiscoveryType refers to the service discovery type to use for resolving the cluster. + // + // This defaults to STRICT_DNS. + // Other options include STATIC, LOGICAL_DNS, EDS or ORIGINAL_DST. + DiscoveryType ClusterDiscoveryType `json:",omitempty" alias:"discovery_type"` + + // TLSCertificates refers to the data containing certificate authority certificates to use + // in verifying a presented peer certificate. + // If not specified and a peer certificate is presented it will not be verified. + // + // Must be either CaCertificateProviderInstance or TrustedCA. + TLSCertificates *JWKSTLSCertificate `json:",omitempty" alias:"tls_certificates"` + + // The timeout for new network connections to hosts in the cluster. + // If not set, a default value of 5s will be used. + ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` +} + +type ClusterDiscoveryType string + +// JWKSTLSCertificate refers to the data containing certificate authority certificates to use +// in verifying a presented peer certificate. +// If not specified and a peer certificate is presented it will not be verified. +// +// Must be either CaCertificateProviderInstance or TrustedCA. +type JWKSTLSCertificate struct { + // CaCertificateProviderInstance Certificate provider instance for fetching TLS certificates. + CaCertificateProviderInstance *JWKSTLSCertProviderInstance `json:",omitempty" alias:"ca_certificate_provider_instance"` + + // TrustedCA defines TLS certificate data containing certificate authority certificates + // to use in verifying a presented peer certificate. + // + // Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. + TrustedCA *JWKSTLSCertTrustedCA `json:",omitempty" alias:"trusted_ca"` +} + +// JWKSTLSCertTrustedCA defines TLS certificate data containing certificate authority certificates +// to use in verifying a presented peer certificate. +// +// Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. +type JWKSTLSCertTrustedCA struct { + Filename string `json:",omitempty" alias:"filename"` + EnvironmentVariable string `json:",omitempty" alias:"environment_variable"` + InlineString string `json:",omitempty" alias:"inline_string"` + InlineBytes []byte `json:",omitempty" alias:"inline_bytes"` +} + +type JWKSTLSCertProviderInstance struct { + // InstanceName refers to the certificate provider instance name + // + // The default value is "default". + InstanceName string `json:",omitempty" alias:"instance_name"` + + // CertificateName is used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. + // + // The default value is the empty string. + CertificateName string `json:",omitempty" alias:"certificate_name"` +} + +type JWKSRetryPolicy struct { + // NumRetries is the number of times to retry fetching the JWKS. + // The retry strategy uses jittered exponential backoff with + // a base interval of 1s and max of 10s. + // + // Default value is 0. + NumRetries int `json:",omitempty" alias:"num_retries"` + + // Backoff policy + // + // Defaults to Envoy's backoff policy + RetryPolicyBackOff *RetryPolicyBackOff `json:",omitempty" alias:"retry_policy_back_off"` +} + +type RetryPolicyBackOff struct { + // BaseInterval to be used for the next back off computation + // + // The default value from envoy is 1s + BaseInterval time.Duration `json:",omitempty" alias:"base_interval"` + + // MaxInternal to be used to specify the maximum interval between retries. + // Optional but should be greater or equal to BaseInterval. + // + // Defaults to 10 times BaseInterval + MaxInterval time.Duration `json:",omitempty" alias:"max_interval"` +} + +type JWTCacheConfig struct { + // Size specifies the maximum number of JWT verification + // results to cache. + // + // Defaults to 0, meaning that JWT caching is disabled. + Size int `json:",omitempty"` +} + +func (e *JWTProviderConfigEntry) GetKind() string { + return JWTProvider +} + +func (e *JWTProviderConfigEntry) GetName() string { return e.Name } +func (e *JWTProviderConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *JWTProviderConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *JWTProviderConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *JWTProviderConfigEntry) GetPartition() string { return e.Partition } +func (e *JWTProviderConfigEntry) GetNamespace() string { return e.Namespace } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go new file mode 100644 index 0000000000..1a1ebb8b53 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" +) + +// MeshConfigEntry manages the global configuration for all service mesh +// proxies. +type MeshConfigEntry struct { + // Partition is the partition the MeshConfigEntry applies to. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the MeshConfigEntry applies to. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // TransparentProxy applies configuration specific to proxies + // in transparent mode. + TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + + // AllowEnablingPermissiveMutualTLS must be true in order to allow setting + // MutualTLSMode=permissive in either service-defaults or proxy-defaults. + AllowEnablingPermissiveMutualTLS bool `json:",omitempty" alias:"allow_enabling_permissive_mutual_tls"` + + TLS *MeshTLSConfig `json:",omitempty"` + + HTTP *MeshHTTPConfig `json:",omitempty"` + + Peering *PeeringMeshConfig `json:",omitempty"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +type TransparentProxyMeshConfig struct { + MeshDestinationsOnly bool `alias:"mesh_destinations_only"` +} + +type MeshTLSConfig struct { + Incoming *MeshDirectionalTLSConfig `json:",omitempty"` + Outgoing *MeshDirectionalTLSConfig `json:",omitempty"` +} + +type MeshDirectionalTLSConfig struct { + TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` +} + +type MeshHTTPConfig struct { + SanitizeXForwardedClientCert bool `alias:"sanitize_x_forwarded_client_cert"` +} + +type PeeringMeshConfig struct { + PeerThroughMeshGateways bool `json:",omitempty" alias:"peer_through_mesh_gateways"` +} + +func (e *MeshConfigEntry) GetKind() string { return MeshConfig } +func (e *MeshConfigEntry) GetName() string { return MeshConfigMesh } +func (e *MeshConfigEntry) GetPartition() string { return e.Partition } +func (e *MeshConfigEntry) GetNamespace() string { return e.Namespace } +func (e *MeshConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *MeshConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *MeshConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +// MarshalJSON adds the Kind field so that the JSON can be decoded back into the +// correct type. +func (e *MeshConfigEntry) MarshalJSON() ([]byte, error) { + type Alias MeshConfigEntry + source := &struct { + Kind string + *Alias + }{ + Kind: MeshConfig, + Alias: (*Alias)(e), + } + return json.Marshal(source) +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go new file mode 100644 index 0000000000..7af2a2658f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type ReadWriteRatesConfig struct { + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` +} + +type RateLimitIPConfigEntry struct { + // Kind of the config entry. This will be set to structs.RateLimitIPConfig + Kind string + Name string + Mode string // {permissive, enforcing, disabled} + + Meta map[string]string `json:",omitempty"` + // overall limits + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` + + //limits specific to a type of call + ACL *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryACL OperationCategory = "ACL" + Catalog *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCatalog OperationCategory = "Catalog" + ConfigEntry *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConfigEntry OperationCategory = "ConfigEntry" + ConnectCA *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConnectCA OperationCategory = "ConnectCA" + Coordinate *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCoordinate OperationCategory = "Coordinate" + DiscoveryChain *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDiscoveryChain OperationCategory = "DiscoveryChain" + ServerDiscovery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryServerDiscovery OperationCategory = "ServerDiscovery" + Health *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryHealth OperationCategory = "Health" + Intention *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryIntention OperationCategory = "Intention" + KV *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryKV OperationCategory = "KV" + Tenancy *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPartition OperationCategory = "Tenancy" + PreparedQuery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPreparedQuery OperationCategory = "PreparedQuery" + Session *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySession OperationCategory = "Session" + Txn *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryTxn OperationCategory = "Txn" + AutoConfig *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryAutoConfig OperationCategory = "AutoConfig" + FederationState *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryFederationState OperationCategory = "FederationState" + Internal *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryInternal OperationCategory = "Internal" + PeerStream *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeerStream OperationCategory = "PeerStream" + Peering *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeering OperationCategory = "Peering" + DataPlane *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDataPlane OperationCategory = "DataPlane" + DNS *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDNS OperationCategory = "DNS" + Subscribe *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySubscribe OperationCategory = "Subscribe" + Resource *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryResource OperationCategory = "Resource" + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +func (r *RateLimitIPConfigEntry) GetKind() string { + return RateLimitIPConfig +} +func (r *RateLimitIPConfigEntry) GetName() string { + if r == nil { + return "" + } + return r.Name +} +func (r *RateLimitIPConfigEntry) GetPartition() string { + return r.Partition +} +func (r *RateLimitIPConfigEntry) GetNamespace() string { + return r.Namespace +} +func (r *RateLimitIPConfigEntry) GetMeta() map[string]string { + if r == nil { + return nil + } + return r.Meta +} +func (r *RateLimitIPConfigEntry) GetCreateIndex() uint64 { + return r.CreateIndex +} +func (r *RateLimitIPConfigEntry) GetModifyIndex() uint64 { + return r.ModifyIndex +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go new file mode 100644 index 0000000000..bbaa032d50 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "time" + +// TCPRouteConfigEntry -- TODO stub +type TCPRouteConfigEntry struct { + // Kind of the config entry. This should be set to api.TCPRoute. + Kind string + + // Name is used to match the config entry with its associated tcp-route + // service. This should match the name provided in the service definition. + Name string + + // Parents is a list of gateways that this route should be bound to. + Parents []ResourceReference + // Services is a list of TCP-based services that this should route to. + // Currently, this must specify at maximum one service. + Services []TCPService + + Meta map[string]string `json:",omitempty"` + + // Status is the asynchronous status which a TCPRoute propagates to the user. + Status ConfigEntryStatus + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +func (a *TCPRouteConfigEntry) GetKind() string { return TCPRoute } +func (a *TCPRouteConfigEntry) GetName() string { return a.Name } +func (a *TCPRouteConfigEntry) GetPartition() string { return a.Partition } +func (a *TCPRouteConfigEntry) GetNamespace() string { return a.Namespace } +func (a *TCPRouteConfigEntry) GetMeta() map[string]string { return a.Meta } +func (a *TCPRouteConfigEntry) GetCreateIndex() uint64 { return a.CreateIndex } +func (a *TCPRouteConfigEntry) GetModifyIndex() uint64 { return a.ModifyIndex } + +// TCPService is a service reference for a TCPRoute +type TCPService struct { + Name string + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// HTTPRouteConfigEntry manages the configuration for a HTTP route +// with the given name. +type HTTPRouteConfigEntry struct { + // Kind of the config entry. This should be set to api.HTTPRoute. + Kind string + + // Name is used to match the config entry with its associated http-route. + Name string + + // Parents is a list of gateways that this route should be bound to + Parents []ResourceReference + // Rules are a list of HTTP-based routing rules that this route should + // use for constructing a routing table. + Rules []HTTPRouteRule + // Hostnames are the hostnames for which this HTTPRoute should respond to requests. + Hostnames []string + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Status is the asynchronous status which an HTTPRoute propagates to the user. + Status ConfigEntryStatus +} + +func (r *HTTPRouteConfigEntry) GetKind() string { return HTTPRoute } +func (r *HTTPRouteConfigEntry) GetName() string { return r.Name } +func (r *HTTPRouteConfigEntry) GetPartition() string { return r.Partition } +func (r *HTTPRouteConfigEntry) GetNamespace() string { return r.Namespace } +func (r *HTTPRouteConfigEntry) GetMeta() map[string]string { return r.Meta } +func (r *HTTPRouteConfigEntry) GetCreateIndex() uint64 { return r.CreateIndex } +func (r *HTTPRouteConfigEntry) GetModifyIndex() uint64 { return r.ModifyIndex } + +// HTTPMatch specifies the criteria that should be +// used in determining whether or not a request should +// be routed to a given set of services. +type HTTPMatch struct { + Headers []HTTPHeaderMatch + Method HTTPMatchMethod + Path HTTPPathMatch + Query []HTTPQueryMatch +} + +// HTTPMatchMethod specifies which type of HTTP verb should +// be used for matching a given request. +type HTTPMatchMethod string + +const ( + HTTPMatchMethodAll HTTPMatchMethod = "" + HTTPMatchMethodConnect HTTPMatchMethod = "CONNECT" + HTTPMatchMethodDelete HTTPMatchMethod = "DELETE" + HTTPMatchMethodGet HTTPMatchMethod = "GET" + HTTPMatchMethodHead HTTPMatchMethod = "HEAD" + HTTPMatchMethodOptions HTTPMatchMethod = "OPTIONS" + HTTPMatchMethodPatch HTTPMatchMethod = "PATCH" + HTTPMatchMethodPost HTTPMatchMethod = "POST" + HTTPMatchMethodPut HTTPMatchMethod = "PUT" + HTTPMatchMethodTrace HTTPMatchMethod = "TRACE" +) + +// HTTPHeaderMatchType specifies how header matching criteria +// should be applied to a request. +type HTTPHeaderMatchType string + +const ( + HTTPHeaderMatchExact HTTPHeaderMatchType = "exact" + HTTPHeaderMatchPrefix HTTPHeaderMatchType = "prefix" + HTTPHeaderMatchPresent HTTPHeaderMatchType = "present" + HTTPHeaderMatchRegularExpression HTTPHeaderMatchType = "regex" + HTTPHeaderMatchSuffix HTTPHeaderMatchType = "suffix" +) + +// HTTPHeaderMatch specifies how a match should be done +// on a request's headers. +type HTTPHeaderMatch struct { + Match HTTPHeaderMatchType + Name string + Value string +} + +// HTTPPathMatchType specifies how path matching criteria +// should be applied to a request. +type HTTPPathMatchType string + +const ( + HTTPPathMatchExact HTTPPathMatchType = "exact" + HTTPPathMatchPrefix HTTPPathMatchType = "prefix" + HTTPPathMatchRegularExpression HTTPPathMatchType = "regex" +) + +// HTTPPathMatch specifies how a match should be done +// on a request's path. +type HTTPPathMatch struct { + Match HTTPPathMatchType + Value string +} + +// HTTPQueryMatchType specifies how querys matching criteria +// should be applied to a request. +type HTTPQueryMatchType string + +const ( + HTTPQueryMatchExact HTTPQueryMatchType = "exact" + HTTPQueryMatchPresent HTTPQueryMatchType = "present" + HTTPQueryMatchRegularExpression HTTPQueryMatchType = "regex" +) + +// HTTPQueryMatch specifies how a match should be done +// on a request's query parameters. +type HTTPQueryMatch struct { + Match HTTPQueryMatchType + Name string + Value string +} + +// HTTPFilters specifies a list of filters used to modify a request +// before it is routed to an upstream. +type HTTPFilters struct { + Headers []HTTPHeaderFilter + URLRewrite *URLRewrite + RetryFilter *RetryFilter + TimeoutFilter *TimeoutFilter + JWT *JWTFilter +} + +// HTTPResponseFilters specifies a list of filters used to modify a +// response returned by an upstream +type HTTPResponseFilters struct { + Headers []HTTPHeaderFilter +} + +// HTTPHeaderFilter specifies how HTTP headers should be modified. +type HTTPHeaderFilter struct { + Add map[string]string + Remove []string + Set map[string]string +} + +type URLRewrite struct { + Path string +} + +type RetryFilter struct { + NumRetries uint32 + RetryOn []string + RetryOnStatusCodes []uint32 + RetryOnConnectFailure bool +} + +type TimeoutFilter struct { + RequestTimeout time.Duration + IdleTimeout time.Duration +} + +// JWTFilter specifies the JWT configuration for a route +type JWTFilter struct { + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + +// HTTPRouteRule specifies the routing rules used to determine what upstream +// service an HTTP request is routed to. +type HTTPRouteRule struct { + // Filters is a list of HTTP-based filters used to modify a request prior + // to routing it to the upstream service + Filters HTTPFilters + // ResponseFilters is a list of HTTP-based filters used to modify a response + // returned by the upstream service + ResponseFilters HTTPResponseFilters + // Matches specified the matching criteria used in the routing table. If a + // request matches the given HTTPMatch configuration, then traffic is routed + // to services specified in the Services field. + Matches []HTTPMatch + // Services is a list of HTTP-based services to route to if the request matches + // the rules specified in the Matches field. + Services []HTTPService +} + +// HTTPService is a service reference for HTTP-based routing rules +type HTTPService struct { + Name string + // Weight is an arbitrary integer used in calculating how much + // traffic should be sent to the given service. + Weight int + + // Filters is a list of HTTP-based filters used to modify a request prior + // to routing it to the upstream service + Filters HTTPFilters + + // ResponseFilters is a list of HTTP-based filters used to modify the + // response returned from the upstream service + ResponseFilters HTTPResponseFilters + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go b/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go new file mode 100644 index 0000000000..1217efe7d2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type SamenessGroupConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + DefaultForFailover bool `json:",omitempty" alias:"default_for_failover"` + IncludeLocal bool `json:",omitempty" alias:"include_local"` + Members []SamenessGroupMember + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +type SamenessGroupMember struct { + Partition string `json:",omitempty"` + Peer string `json:",omitempty"` +} + +func (s *SamenessGroupConfigEntry) GetKind() string { return s.Kind } +func (s *SamenessGroupConfigEntry) GetName() string { return s.Name } +func (s *SamenessGroupConfigEntry) GetPartition() string { return s.Partition } +func (s *SamenessGroupConfigEntry) GetNamespace() string { return "" } +func (s *SamenessGroupConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } +func (s *SamenessGroupConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } +func (s *SamenessGroupConfigEntry) GetMeta() map[string]string { return s.Meta } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_status.go b/vendor/github.com/hashicorp/consul/api/config_entry_status.go new file mode 100644 index 0000000000..997066f24f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_status.go @@ -0,0 +1,358 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "time" + + "golang.org/x/exp/slices" +) + +// ResourceReference is a reference to a ConfigEntry +// with an optional reference to a subsection of that ConfigEntry +// that can be specified as SectionName +type ResourceReference struct { + // Kind is the kind of ConfigEntry that this resource refers to. + Kind string + // Name is the identifier for the ConfigEntry this resource refers to. + Name string + // SectionName is a generic subresource identifier that specifies + // a subset of the ConfigEntry to which this reference applies. Usage + // of this field should be up to the controller that leverages it. If + // unused, this should be blank. + SectionName string + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// ConfigEntryStatus is used for propagating back asynchronously calculated +// messages from control loops to a user +type ConfigEntryStatus struct { + // Conditions is the set of condition objects associated with + // a ConfigEntry status. + Conditions []Condition +} + +// Condition is used for a single message and state associated +// with an object. For example, a ConfigEntry that references +// multiple other resources may have different statuses with +// respect to each of those resources. +type Condition struct { + // Type is a value from a bounded set of types that an object might have + Type string + // Status is a value from a bounded set of statuses that an object might have + Status ConditionStatus + // Reason is a value from a bounded set of reasons for a given status + Reason string + // Message is a message that gives more detailed information about + // why a Condition has a given status and reason + Message string + // Resource is an optional reference to a resource for which this + // condition applies + Resource *ResourceReference + // LastTransitionTime is the time at which this Condition was created + LastTransitionTime *time.Time +} + +type ( + ConditionStatus string +) + +const ( + ConditionStatusTrue ConditionStatus = "True" + ConditionStatusFalse ConditionStatus = "False" + ConditionStatusUnknown ConditionStatus = "Unknown" +) + +// GatewayConditionType is a type of condition associated with a +// Gateway. This type should be used with the GatewayStatus.Conditions +// field. +type GatewayConditionType string + +// GatewayConditionReason defines the set of reasons that explain why a +// particular Gateway condition type has been raised. +type GatewayConditionReason string + +// the following are directly from the k8s spec +const ( + // This condition is true when the controller managing the Gateway is + // syntactically and semantically valid enough to produce some configuration + // in the underlying data plane. This does not indicate whether or not the + // configuration has been propagated to the data plane. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * InvalidCertificates + // + GatewayConditionAccepted GatewayConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // True. + GatewayReasonAccepted GatewayConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // certificates and cannot bind to any routes + GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" + + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // JWT providers and cannot bind to any routes + GatewayReasonInvalidJWTProviders GatewayConditionReason = "InvalidJWTProviders" + + // This condition indicates that the gateway was unable to resolve + // conflicting specification requirements for this Listener. If a + // Listener is conflicted, its network port should not be configured + // on any network elements. + // + // Possible reasons for this condition to be true are: + // + // * "RouteConflict" + // + // Possible reasons for this condition to be False are: + // + // * "NoConflict" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionConflicted GatewayConditionType = "Conflicted" + // This reason is used with the "Conflicted" condition when the condition + // is False. + GatewayReasonNoConflict GatewayConditionReason = "NoConflict" + // This reason is used with the "Conflicted" condition when the route is + // in a conflicted state, such as when a TCPListener attempts to bind to two routes + GatewayReasonRouteConflict GatewayConditionReason = "RouteConflict" + + // This condition indicates whether the controller was able to + // resolve all the object references for the Gateway. When setting this + // condition to False, a ResourceReference to the misconfigured Listener should + // be provided. + // + // Possible reasons for this condition to be true are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCertificateRef" + // * "InvalidRouteKinds" + // * "RefNotPermitted" + // + GatewayConditionResolvedRefs GatewayConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + GatewayReasonResolvedRefs GatewayConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a TLS configuration with at least one TLS CertificateRef + // that is invalid or does not exist. + // A CertificateRef is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + // This reason must be used only when the reference is allowed, either by + // referencing an object in the same namespace as the Gateway, or when + // a cross-namespace reference has been explicitly allowed by a ReferenceGrant. + // If the reference is not allowed, the reason RefNotPermitted must be used + // instead. + GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a JWT configuration with at least one JWTProvider + // that is invalid or does not exist. + // A JWTProvider is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + GatewayListenerReasonInvalidJWTProviderRef GatewayConditionReason = "InvalidJWTProviderRef" +) + +var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ + GatewayConditionAccepted: { + ConditionStatusTrue: { + GatewayReasonAccepted, + }, + ConditionStatusFalse: { + GatewayReasonInvalidCertificates, + GatewayReasonInvalidJWTProviders, + }, + ConditionStatusUnknown: {}, + }, + GatewayConditionConflicted: { + ConditionStatusTrue: { + GatewayReasonRouteConflict, + }, + ConditionStatusFalse: { + GatewayReasonNoConflict, + }, + ConditionStatusUnknown: {}, + }, + GatewayConditionResolvedRefs: { + ConditionStatusTrue: { + GatewayReasonResolvedRefs, + }, + ConditionStatusFalse: { + GatewayListenerReasonInvalidCertificateRef, + GatewayListenerReasonInvalidJWTProviderRef, + }, + ConditionStatusUnknown: {}, + }, +} + +func ValidateGatewayConditionReason(name GatewayConditionType, status ConditionStatus, reason GatewayConditionReason) error { + if err := checkConditionStatus(status); err != nil { + return err + } + + reasons, ok := validGatewayConditionReasonsMapping[name] + if !ok { + return fmt.Errorf("unrecognized GatewayConditionType %q", name) + } + + reasonsForStatus, ok := reasons[status] + if !ok { + return fmt.Errorf("unrecognized ConditionStatus %q", status) + } + + if !slices.Contains(reasonsForStatus, reason) { + return fmt.Errorf("gateway condition reason %q not allowed for gateway condition type %q with status %q", reason, name, status) + } + return nil +} + +// RouteConditionType is a type of condition for a route. +type RouteConditionType string + +// RouteConditionReason is a reason for a route condition. +type RouteConditionReason string + +// The following statuses are taken from the K8's Spec +// With the exception of: "RouteReasonInvalidDiscoveryChain" and "NoUpstreamServicesTargeted" +const ( + // This condition indicates whether the route has been accepted or rejected + // by a Gateway, and why. + // + // Possible reasons for this condition to be true are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidDiscoveryChain" + // * "NoUpstreamServicesTargeted" + // + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionAccepted RouteConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the Route has been + // accepted by the Gateway. + RouteReasonAccepted RouteConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the route has an + // invalid discovery chain, this includes conditions like the protocol being invalid + // or the discovery chain failing to compile + RouteReasonInvalidDiscoveryChain RouteConditionReason = "InvalidDiscoveryChain" + + // This reason is used with the "Accepted" condition when the route + RouteReasonNoUpstreamServicesTargeted RouteConditionReason = "NoUpstreamServicesTargeted" +) + +// the following statuses are custom to Consul +const ( + // This condition indicates whether the route was able to successfully bind the + // Listener on the gateway + // Possible reasons for this condition to be true are: + // + // * "Bound" + // + // Possible reasons for this condition to be false are: + // + // * "FailedToBind" + // * "GatewayNotFound" + // + RouteConditionBound RouteConditionType = "Bound" + + // This reason is used with the "Bound" condition when the condition + // is true + RouteReasonBound RouteConditionReason = "Bound" + + // This reason is used with the "Bound" condition when the route failed + // to bind to the gateway + RouteReasonFailedToBind RouteConditionReason = "FailedToBind" + + // This reason is used with the "Bound" condition when the route fails + // to find the gateway + RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" + + // This reason is used with the "Accepted" condition when the route references non-existent + // JWTProviders + RouteReasonJWTProvidersNotFound RouteConditionReason = "JWTProvidersNotFound" +) + +var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ + RouteConditionAccepted: { + ConditionStatusTrue: { + RouteReasonAccepted, + }, + ConditionStatusFalse: { + RouteReasonInvalidDiscoveryChain, + RouteReasonNoUpstreamServicesTargeted, + }, + ConditionStatusUnknown: {}, + }, + RouteConditionBound: { + ConditionStatusTrue: { + RouteReasonBound, + }, + ConditionStatusFalse: { + RouteReasonGatewayNotFound, + RouteReasonFailedToBind, + RouteReasonJWTProvidersNotFound, + }, + ConditionStatusUnknown: {}, + }, +} + +func ValidateRouteConditionReason(name RouteConditionType, status ConditionStatus, reason RouteConditionReason) error { + if err := checkConditionStatus(status); err != nil { + return err + } + + reasons, ok := validRouteConditionReasonsMapping[name] + if !ok { + return fmt.Errorf("unrecognized RouteConditionType %s", name) + } + + reasonsForStatus, ok := reasons[status] + if !ok { + return fmt.Errorf("unrecognized ConditionStatus %s", name) + } + + if !slices.Contains(reasonsForStatus, reason) { + return fmt.Errorf("route condition reason %s not allowed for route condition type %s with status %s", reason, name, status) + } + + return nil +} + +func checkConditionStatus(status ConditionStatus) error { + switch status { + case ConditionStatusTrue, ConditionStatusFalse, ConditionStatusUnknown: + return nil + default: + return fmt.Errorf("unrecognized condition status: %q", status) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go new file mode 100644 index 0000000000..77be00034d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// TelemetryCollectorName is the service name for the Consul Telemetry Collector +const TelemetryCollectorName string = "consul-telemetry-collector" + +// Connect can be used to work with endpoints related to Connect, the +// feature for securely connecting services within Consul. +type Connect struct { + c *Client +} + +// Connect returns a handle to the connect-related endpoints +func (c *Client) Connect() *Connect { + return &Connect{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go new file mode 100644 index 0000000000..8a5c9f870e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go @@ -0,0 +1,201 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "time" + + "github.com/mitchellh/mapstructure" +) + +// CAConfig is the structure for the Connect CA configuration. +type CAConfig struct { + // Provider is the CA provider implementation to use. + Provider string + + // Configuration is arbitrary configuration for the provider. This + // should only contain primitive values and containers (such as lists + // and maps). + Config map[string]interface{} + + // State is read-only data that the provider might have persisted for use + // after restart or leadership transition. For example this might include + // UUIDs of resources it has created. Setting this when writing a + // configuration is an error. + State map[string]string + + // ForceWithoutCrossSigning indicates that the CA reconfiguration should go + // ahead even if the current CA is unable to cross sign certificates. This + // risks temporary connection failures during the rollout as new leafs will be + // rejected by proxies that have not yet observed the new root cert but is the + // only option if a CA that doesn't support cross signing needs to be + // reconfigured or mirated away from. + ForceWithoutCrossSigning bool + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CommonCAProviderConfig is the common options available to all CA providers. +type CommonCAProviderConfig struct { + LeafCertTTL time.Duration + RootCertTTL time.Duration + SkipValidate bool + CSRMaxPerSecond float32 + CSRMaxConcurrent int +} + +// ConsulCAProviderConfig is the config for the built-in Consul CA provider. +type ConsulCAProviderConfig struct { + CommonCAProviderConfig `mapstructure:",squash"` + + PrivateKey string + RootCert string + IntermediateCertTTL time.Duration +} + +// ParseConsulCAConfig takes a raw config map and returns a parsed +// ConsulCAProviderConfig. +func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config ConsulCAProviderConfig + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// CARootList is the structure for the results of listing roots. +type CARootList struct { + ActiveRootID string + TrustDomain string + Roots []*CARoot +} + +// CARoot represents a root CA certificate that is trusted. +type CARoot struct { + // ID is a globally unique ID (UUID) representing this CA root. + ID string + + // Name is a human-friendly name for this CA root. This value is + // opaque to Consul and is not used for anything internally. + Name string + + // RootCertPEM is the PEM-encoded public certificate. + RootCertPEM string `json:"RootCert"` + + // Active is true if this is the current active CA. This must only + // be true for exactly one CA. For any method that modifies roots in the + // state store, tests should be written to verify that multiple roots + // cannot be active. + Active bool + + CreateIndex uint64 + ModifyIndex uint64 +} + +// LeafCert is a certificate that has been issued by a Connect CA. +type LeafCert struct { + // SerialNumber is the unique serial number for this certificate. + // This is encoded in standard hex separated by :. + SerialNumber string + + // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private + // key for that cert, respectively. This should not be stored in the + // state store, but is present in the sign API response. + CertPEM string `json:",omitempty"` + PrivateKeyPEM string `json:",omitempty"` + + // Service is the name of the service for which the cert was issued. + // ServiceURI is the cert URI value. + Service string + ServiceURI string + + // ValidAfter and ValidBefore are the validity periods for the + // certificate. + ValidAfter time.Time + ValidBefore time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CARoots queries the list of available roots. +func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CAGetConfig returns the current CA configuration. +func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/configuration") + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CAConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CASetConfig sets the current CA configuration. +func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") + r.setWriteOptions(q) + r.obj = conf + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go new file mode 100644 index 0000000000..e91c03e8b7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -0,0 +1,461 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "fmt" + "io" + "time" +) + +// Intention defines an intention for the Connect Service Graph. This defines +// the allowed or denied behavior of a connection between two services using +// Connect. +type Intention struct { + // ID is the UUID-based ID for the intention, always generated by Consul. + ID string `json:",omitempty"` + + // Description is a human-friendly description of this intention. + // It is opaque to Consul and is only stored and transferred in API + // requests. + Description string `json:",omitempty"` + + // SourceNS, SourceName are the namespace and name, respectively, of + // the source service. Either of these may be the wildcard "*", but only + // the full value can be a wildcard. Partial wildcards are not allowed. + // The source may also be a non-Consul service, as specified by SourceType. + // + // DestinationNS, DestinationName is the same, but for the destination + // service. The same rules apply. The destination is always a Consul + // service. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourcePartition and DestinationPartition cannot be wildcards "*" and + // are not compatible with legacy intentions. + SourcePartition string `json:",omitempty"` + DestinationPartition string `json:",omitempty"` + + // SourcePeer cannot be a wildcard "*" and is not compatible with legacy + // intentions. Cannot be used with SourcePartition, as both represent the + // same level of tenancy (partition is local to cluster, peer is remote). + SourcePeer string `json:",omitempty"` + + // SourceSamenessGroup cannot be wildcards "*" and + // is not compatible with legacy intentions. + SourceSamenessGroup string `json:",omitempty"` + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType + + // Action is whether this is an allowlist or denylist intention. + Action IntentionAction `json:",omitempty"` + + // Permissions is the list of additional L7 attributes that extend the + // intention definition. + // + // NOTE: This field is not editable unless editing the underlying + // service-intentions config entry directly. + Permissions []*IntentionPermission `json:",omitempty"` + + // DefaultAddr is not used. + // Deprecated: DefaultAddr is not used and may be removed in a future version. + DefaultAddr string `json:",omitempty"` + // DefaultPort is not used. + // Deprecated: DefaultPort is not used and may be removed in a future version. + DefaultPort int `json:",omitempty"` + + // Meta is arbitrary metadata associated with the intention. This is + // opaque to Consul but is served in API responses. + Meta map[string]string `json:",omitempty"` + + // Precedence is the order that the intention will be applied, with + // larger numbers being applied first. This is a read-only field, on + // any intention update it is updated. + Precedence int + + // CreatedAt and UpdatedAt keep track of when this record was created + // or modified. + CreatedAt, UpdatedAt time.Time + + // Hash of the contents of the intention + // + // This is needed mainly for replication purposes. When replicating from + // one DC to another keeping the content Hash will allow us to detect + // content changes more efficiently than checking every single field + Hash []byte `json:",omitempty"` + + CreateIndex uint64 + ModifyIndex uint64 +} + +// String returns human-friendly output describing ths intention. +func (i *Intention) String() string { + var detail string + switch n := len(i.Permissions); n { + case 0: + detail = string(i.Action) + case 1: + detail = "1 permission" + default: + detail = fmt.Sprintf("%d permissions", len(i.Permissions)) + } + + return fmt.Sprintf("%s => %s (%s)", + i.SourceString(), + i.DestinationString(), + detail) +} + +// SourceString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) SourceString() string { + return i.partString(i.SourceNS, i.SourceName) +} + +// DestinationString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) DestinationString() string { + return i.partString(i.DestinationNS, i.DestinationName) +} + +func (i *Intention) partString(ns, n string) string { + // For now we omit the default namespace from the output. In the future + // we might want to look at this and show this in a multi-namespace world. + if ns != "" && ns != IntentionDefaultNamespace { + n = ns + "/" + n + } + + return n +} + +// IntentionDefaultNamespace is the default namespace value. +const IntentionDefaultNamespace = "default" + +// IntentionAction is the action that the intention represents. This +// can be "allow" or "deny" to allowlist or denylist intentions. +type IntentionAction string + +const ( + IntentionActionAllow IntentionAction = "allow" + IntentionActionDeny IntentionAction = "deny" +) + +// IntentionSourceType is the type of the source within an intention. +type IntentionSourceType string + +const ( + // IntentionSourceConsul is a service within the Consul catalog. + IntentionSourceConsul IntentionSourceType = "consul" +) + +// IntentionMatch are the arguments for the intention match API. +type IntentionMatch struct { + By IntentionMatchType + Names []string +} + +// IntentionMatchType is the target for a match request. For example, +// matching by source will look for all intentions that match the given +// source value. +type IntentionMatchType string + +const ( + IntentionMatchSource IntentionMatchType = "source" + IntentionMatchDestination IntentionMatchType = "destination" +) + +// IntentionCheck are the arguments for the intention check API. For +// more documentation see the IntentionCheck function. +type IntentionCheck struct { + // Source and Destination are the source and destination values to + // check. The destination is always a Consul service, but the source + // may be other values as defined by the SourceType. + Source, Destination string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType +} + +// Intentions returns the list of intentions. +func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions") + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionGetExact retrieves a single intention by its unique name instead of +// its ID. +func (h *Connect) IntentionGetExact(source, destination string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/exact") + r.setQueryOptions(q) + r.params.Set("source", source) + r.params.Set("destination", destination) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return nil, nil, fmt.Errorf( + "Unexpected response %d: %s", resp.StatusCode, buf.String()) + } + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// IntentionGet retrieves a single intention. +// +// Deprecated: use IntentionGetExact instead +func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return nil, nil, fmt.Errorf( + "Unexpected response %d: %s", resp.StatusCode, buf.String()) + } + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// IntentionDeleteExact deletes a single intention by its unique name instead of its ID. +func (h *Connect) IntentionDeleteExact(source, destination string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/exact") + r.setWriteOptions(q) + r.params.Set("source", source) + r.params.Set("destination", destination) + + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + +// IntentionDelete deletes a single intention. +// +// Deprecated: use IntentionDeleteExact instead +func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) + r.setWriteOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + +// IntentionMatch returns the list of intentions that match a given source +// or destination. The returned intentions are ordered by precedence where +// result[0] is the highest precedence (if that matches, then that rule overrides +// all other rules). +// +// Matching can be done for multiple names at the same time. The resulting +// map is keyed by the given names. Casing is preserved. +func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/match") + r.setQueryOptions(q) + r.params.Set("by", string(args.By)) + for _, name := range args.Names { + r.params.Add("name", name) + } + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionCheck returns whether a given source/destination would be allowed +// or not given the current set of intentions and the configuration of Consul. +func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/check") + r.setQueryOptions(q) + r.params.Set("source", args.Source) + r.params.Set("destination", args.Destination) + if args.SourceType != "" { + r.params.Set("source-type", string(args.SourceType)) + } + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return false, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return false, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out struct{ Allowed bool } + if err := decodeBody(resp, &out); err != nil { + return false, nil, err + } + return out.Allowed, qm, nil +} + +// IntentionUpsert will update an existing intention. The Source & Destination parameters +// in the structure must be non-empty. The ID must be empty. +func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/exact") + r.setWriteOptions(q) + r.params.Set("source", maybePrefixNamespaceAndPartition(ixn.SourcePartition, ixn.SourceNS, ixn.SourceName)) + r.params.Set("destination", maybePrefixNamespaceAndPartition(ixn.DestinationPartition, ixn.DestinationNS, ixn.DestinationName)) + r.obj = ixn + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +func maybePrefixNamespaceAndPartition(part, ns, name string) string { + switch { + case part == "" && ns == "": + return name + case part == "" && ns != "": + return ns + "/" + name + case part != "" && ns == "": + return part + "/" + IntentionDefaultNamespace + "/" + name + default: + return part + "/" + ns + "/" + name + } +} + +// IntentionCreate will create a new intention. The ID in the given +// structure must be empty and a generate ID will be returned on +// success. +// +// Deprecated: use IntentionUpsert instead +func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/connect/intentions") + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// IntentionUpdate will update an existing intention. The ID in the given +// structure must be non-empty. +// +// Deprecated: use IntentionUpsert instead +func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 0000000000..b0269adaef --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Partition string `json:",omitempty"` + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Update inserts or updates the LAN coordinate of a node. +func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/coordinate/update") + r.setWriteOptions(q) + r.obj = coord + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Node is used to return the coordinates of a single node in the LAN pool. +func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go new file mode 100644 index 0000000000..e6b5dc52da --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "io" + "strconv" +) + +// Debug can be used to query the /debug/pprof endpoints to gather +// profiling information about the target agent.Debug +// +// The agent must have enable_debug set to true for profiling to be enabled +// and for these endpoints to function. +type Debug struct { + c *Client +} + +// Debug returns a handle that exposes the internal debug endpoints. +func (c *Client) Debug() *Debug { + return &Debug{c} +} + +// Heap returns a pprof heap dump +func (d *Debug) Heap() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/heap") + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Profile returns a pprof CPU profile for the specified number of seconds +func (d *Debug) Profile(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/profile") + + // Capture a profile for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// PProf returns a pprof profile for the specified number of seconds. The caller +// is responsible for closing the returned io.ReadCloser once all bytes are read. +func (d *Debug) PProf(ctx context.Context, name string, seconds int) (io.ReadCloser, error) { + r := d.c.newRequest("GET", "/debug/pprof/"+name) + r.ctx = ctx + + // Capture a profile for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + if err := requireOK(resp); err != nil { + return nil, err + } + return resp.Body, nil +} + +// Trace returns an execution trace +func (d *Debug) Trace(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/trace") + + // Capture a trace for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Goroutine returns a pprof goroutine profile +func (d *Debug) Goroutine() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/goroutine") + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go new file mode 100644 index 0000000000..4b6260cf34 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "fmt" + "time" +) + +// DiscoveryChain can be used to query the discovery-chain endpoints +type DiscoveryChain struct { + c *Client +} + +// DiscoveryChain returns a handle to the discovery-chain endpoints +func (c *Client) DiscoveryChain() *DiscoveryChain { + return &DiscoveryChain{c} +} + +func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) { + if name == "" { + return nil, nil, fmt.Errorf("Name parameter must not be empty") + } + + method := "GET" + if opts != nil && opts.requiresPOST() { + method = "POST" + } + + r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name)) + r.setQueryOptions(q) + + if opts != nil { + if opts.EvaluateInDatacenter != "" { + r.params.Set("compile-dc", opts.EvaluateInDatacenter) + } + } + + if method == "POST" { + r.obj = opts + } + rtt, resp, err := d.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out DiscoveryChainResponse + + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +type DiscoveryChainOptions struct { + EvaluateInDatacenter string `json:"-"` + + // OverrideMeshGateway allows for the mesh gateway setting to be overridden + // for any resolver in the compiled chain. + OverrideMeshGateway MeshGatewayConfig `json:",omitempty"` + + // OverrideProtocol allows for the final protocol for the chain to be + // altered. + // + // - If the chain ordinarily would be TCP and an L7 protocol is passed here + // the chain will not include Routers or Splitters. + // + // - If the chain ordinarily would be L7 and TCP is passed here the chain + // will not include Routers or Splitters. + OverrideProtocol string `json:",omitempty"` + + // OverrideConnectTimeout allows for the ConnectTimeout setting to be + // overridden for any resolver in the compiled chain. + OverrideConnectTimeout time.Duration `json:",omitempty"` +} + +func (o *DiscoveryChainOptions) requiresPOST() bool { + if o == nil { + return false + } + return o.OverrideMeshGateway.Mode != "" || + o.OverrideProtocol != "" || + o.OverrideConnectTimeout != 0 +} + +type DiscoveryChainResponse struct { + Chain *CompiledDiscoveryChain +} + +type CompiledDiscoveryChain struct { + ServiceName string + Namespace string + Datacenter string + + // CustomizationHash is a unique hash of any data that affects the + // compilation of the discovery chain other than config entries or the + // name/namespace/datacenter evaluation criteria. + // + // If set, this value should be used to prefix/suffix any generated load + // balancer data plane objects to avoid sharing customized and + // non-customized versions. + CustomizationHash string + + // Default indicates if this discovery chain is based on no + // service-resolver, service-splitter, or service-router config entries. + Default bool + + // Protocol is the overall protocol shared by everything in the chain. + Protocol string + + // ServiceMeta is the metadata from the underlying service-defaults config + // entry for the service named ServiceName. + ServiceMeta map[string]string + + // StartNode is the first key into the Nodes map that should be followed + // when walking the discovery chain. + StartNode string + + // Nodes contains all nodes available for traversal in the chain keyed by a + // unique name. You can walk this by starting with StartNode. + // + // NOTE: The names should be treated as opaque values and are only + // guaranteed to be consistent within a single compilation. + Nodes map[string]*DiscoveryGraphNode + + // Targets is a list of all targets used in this chain. + // + // NOTE: The names should be treated as opaque values and are only + // guaranteed to be consistent within a single compilation. + Targets map[string]*DiscoveryTarget +} + +const ( + DiscoveryGraphNodeTypeRouter = "router" + DiscoveryGraphNodeTypeSplitter = "splitter" + DiscoveryGraphNodeTypeResolver = "resolver" +) + +// DiscoveryGraphNode is a single node in the compiled discovery chain. +type DiscoveryGraphNode struct { + Type string + Name string // this is NOT necessarily a service + + // fields for Type==router + Routes []*DiscoveryRoute + + // fields for Type==splitter + Splits []*DiscoverySplit + + // fields for Type==resolver + Resolver *DiscoveryResolver + + // shared by Type==resolver || Type==splitter + LoadBalancer *LoadBalancer `json:",omitempty"` +} + +// compiled form of ServiceRoute +type DiscoveryRoute struct { + Definition *ServiceRoute + NextNode string +} + +// compiled form of ServiceSplit +type DiscoverySplit struct { + Weight float32 + NextNode string +} + +// compiled form of ServiceResolverConfigEntry +type DiscoveryResolver struct { + Default bool + ConnectTimeout time.Duration + Target string + Failover *DiscoveryFailover +} + +func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) { + type Alias DiscoveryResolver + exported := &struct { + ConnectTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: r.ConnectTimeout.String(), + Alias: (*Alias)(r), + } + if r.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + + return json.Marshal(exported) +} + +func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error { + type Alias DiscoveryResolver + aux := &struct { + ConnectTimeout string + *Alias + }{ + Alias: (*Alias)(r), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.ConnectTimeout != "" { + if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + return err + } + } + return nil +} + +// compiled form of ServiceResolverFailover +type DiscoveryFailover struct { + Targets []string + Policy ServiceResolverFailoverPolicy `json:",omitempty"` +} + +// DiscoveryTarget represents all of the inputs necessary to use a resolver +// config entry to execute a catalog query to generate a list of service +// instances during discovery. +type DiscoveryTarget struct { + ID string + + Service string + ServiceSubset string + Namespace string + Datacenter string + + MeshGateway MeshGatewayConfig + Subset ServiceResolverSubset + ConnectTimeout time.Duration + External bool + SNI string + Name string +} + +func (t *DiscoveryTarget) MarshalJSON() ([]byte, error) { + type Alias DiscoveryTarget + exported := &struct { + ConnectTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: t.ConnectTimeout.String(), + Alias: (*Alias)(t), + } + if t.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + + return json.Marshal(exported) +} + +func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { + type Alias DiscoveryTarget + aux := &struct { + ConnectTimeout string + *Alias + }{ + Alias: (*Alias)(t), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.ConnectTimeout != "" { + if t.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 0000000000..efba89d3b5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + r.header.Set("Content-Type", "application/octet-stream") + + rtt, resp, err := e.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := e.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 0000000000..a023002046 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,398 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + serviceHealth = "service" + connectHealth = "connect" + ingressHealth = "ingress" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string + Type string + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + ExposedPort int + PeerName string `json:",omitempty"` + + Definition HealthCheckDefinition + + CreateIndex uint64 + ModifyIndex uint64 +} + +// HealthCheckDefinition is used to store the details about +// a health check's execution. +type HealthCheckDefinition struct { + HTTP string + Header map[string][]string + Method string + Body string + TLSServerName string + TLSSkipVerify bool + TCP string + TCPUseTLS bool + UDP string + GRPC string + OSService string + GRPCUseTLS bool + IntervalDuration time.Duration `json:"-"` + TimeoutDuration time.Duration `json:"-"` + DeregisterCriticalServiceAfterDuration time.Duration `json:"-"` + + // DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead. + Interval ReadableDuration + Timeout ReadableDuration + DeregisterCriticalServiceAfter ReadableDuration +} + +func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) { + type Alias HealthCheckDefinition + out := &struct { + Interval string + Timeout string + DeregisterCriticalServiceAfter string + *Alias + }{ + Interval: d.Interval.String(), + Timeout: d.Timeout.String(), + DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(), + Alias: (*Alias)(d), + } + + if d.IntervalDuration != 0 { + out.Interval = d.IntervalDuration.String() + } else if d.Interval != 0 { + out.Interval = d.Interval.String() + } + if d.TimeoutDuration != 0 { + out.Timeout = d.TimeoutDuration.String() + } else if d.Timeout != 0 { + out.Timeout = d.Timeout.String() + } + if d.DeregisterCriticalServiceAfterDuration != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String() + } else if d.DeregisterCriticalServiceAfter != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String() + } + + return json.Marshal(out) +} + +func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) { + type Alias HealthCheckDefinition + aux := &struct { + IntervalDuration interface{} + TimeoutDuration interface{} + DeregisterCriticalServiceAfterDuration interface{} + *Alias + }{ + Alias: (*Alias)(t), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Parse the values into both the time.Duration and old ReadableDuration fields. + + if aux.IntervalDuration == nil { + t.IntervalDuration = time.Duration(t.Interval) + } else { + switch v := aux.IntervalDuration.(type) { + case string: + if t.IntervalDuration, err = time.ParseDuration(v); err != nil { + return err + } + case float64: + t.IntervalDuration = time.Duration(v) + } + t.Interval = ReadableDuration(t.IntervalDuration) + } + + if aux.TimeoutDuration == nil { + t.TimeoutDuration = time.Duration(t.Timeout) + } else { + switch v := aux.TimeoutDuration.(type) { + case string: + if t.TimeoutDuration, err = time.ParseDuration(v); err != nil { + return err + } + case float64: + t.TimeoutDuration = time.Duration(v) + } + t.Timeout = ReadableDuration(t.TimeoutDuration) + } + if aux.DeregisterCriticalServiceAfterDuration == nil { + t.DeregisterCriticalServiceAfterDuration = time.Duration(t.DeregisterCriticalServiceAfter) + } else { + switch v := aux.DeregisterCriticalServiceAfterDuration.(type) { + case string: + if t.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(v); err != nil { + return err + } + case float64: + t.DeregisterCriticalServiceAfterDuration = time.Duration(v) + } + t.DeregisterCriticalServiceAfter = ReadableDuration(t.DeregisterCriticalServiceAfterDuration) + } + + return nil +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := check.CheckID + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, serviceHealth) +} + +func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, serviceHealth) +} + +// Connect is equivalent to Service except that it will only return services +// which are Connect-enabled and will returns the connection address for Connect +// client's to use which may be a proxy in front of the named service. If +// passingOnly is true only instances where both the service and any proxy are +// healthy will be returned. +func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, connectHealth) +} + +func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, connectHealth) +} + +// Ingress is equivalent to Connect except that it will only return associated +// ingress gateways for the requested service. +func (h *Health) Ingress(service string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + return h.service(service, tags, passingOnly, q, ingressHealth) +} + +func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, healthType string) ([]*ServiceEntry, *QueryMeta, error) { + var path string + switch healthType { + case connectHealth: + path = "/v1/health/connect/" + service + case ingressHealth: + path = "/v1/health/ingress/" + service + default: + path = "/v1/health/service/" + service + } + + r := h.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/internal.go b/vendor/github.com/hashicorp/consul/api/internal.go new file mode 100644 index 0000000000..b5f400f4b1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/internal.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "context" + +// Internal can be used to query endpoints that are intended for +// Hashicorp internal-use only. +type Internal struct { + c *Client +} + +// Internal returns a handle to endpoints that are for internal +// Hashicorp usage only. There is not guarantee that these will +// be backwards-compatible or supported, so usage of these is +// not encouraged. +func (c *Client) Internal() *Internal { + return &Internal{c} +} + +type AssignServiceManualVIPsRequest struct { + Service string + ManualVIPs []string +} + +type AssignServiceManualVIPsResponse struct { + ServiceFound bool `json:"Found"` + UnassignedFrom []PeeredServiceName +} + +type PeeredServiceName struct { + ServiceName CompoundServiceName + Peer string +} + +func (i *Internal) AssignServiceVirtualIP( + ctx context.Context, + service string, + manualVIPs []string, + wo *WriteOptions, +) (*AssignServiceManualVIPsResponse, *QueryMeta, error) { + req := i.c.newRequest("PUT", "/v1/internal/service-virtual-ip") + req.setWriteOptions(wo) + req.ctx = ctx + req.obj = AssignServiceManualVIPsRequest{ + Service: service, + ManualVIPs: manualVIPs, + } + rtt, resp, err := i.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{RequestTime: rtt} + parseQueryMeta(resp, qm) + + var out AssignServiceManualVIPsResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 0000000000..b9d330a6fd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string + + // Namespace is the namespace the KVPair is associated with + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // Partition is the partition the KVPair is associated with + // Admin Partition is a Consul Enterprise feature. + Partition string `json:",omitempty"` +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer closeResponseBody(resp) + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer closeResponseBody(resp) + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer closeResponseBody(resp) + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + err = requireHttpCodes(resp, 200, 404) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + closeResponseBody(resp) + return nil, qm, nil + } + + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + r.header.Set("Content-Type", "application/octet-stream") + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return false, nil, err + } + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return false, nil, err + } + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// The Txn function has been deprecated from the KV object; please see the Txn +// object for more information about Transactions. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + var ops TxnOps + for _, op := range txn { + ops = append(ops, &TxnOp{KV: op}) + } + + respOk, txnResp, qm, err := k.c.txn(ops, q) + if err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return respOk, &kvResp, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 0000000000..e9529f7bde --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,411 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever + LockDelay time.Duration // Optional, defaults to 15s + Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + wOpts := WriteOptions{ + Namespace: l.opts.Namespace, + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, &wOpts, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := QueryOptions{ + WaitTime: l.opts.LockWaitTime, + Namespace: l.opts.Namespace, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > l.opts.LockWaitTime { + return nil, nil + } + + // Query wait time should not exceed the lock wait time + qOpts.WaitTime = l.opts.LockWaitTime - elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, &qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + + locked, _, err = kv.Acquire(pair, &wOpts) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, &qOpts) + if err != nil { + return nil, err + } + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + w := WriteOptions{Namespace: l.opts.Namespace} + + _, _, err := kv.Release(lockEnt, &w) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + q := QueryOptions{Namespace: l.opts.Namespace} + + pair, _, err := kv.Get(l.opts.Key, &q) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + w := WriteOptions{Namespace: l.opts.Namespace} + didRemove, _, err := kv.DeleteCAS(pair, &w) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + LockDelay: l.opts.LockDelay, + } + } + w := WriteOptions{Namespace: l.opts.Namespace} + id, _, err := session.Create(se, &w) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := QueryOptions{ + RequireConsistent: true, + Namespace: l.opts.Namespace, + } +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, &opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go new file mode 100644 index 0000000000..98afd22998 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/namespace.go @@ -0,0 +1,227 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "fmt" + "time" +) + +// Namespace is the configuration of a single namespace. Namespacing is a Consul Enterprise feature. +type Namespace struct { + // Name is the name of the Namespace. It must be unique and + // must be a DNS hostname. There are also other reserved names + // that may not be used. + Name string `json:"Name"` + + // Description is where the user puts any information they want + // about the namespace. It is not used internally. + Description string `json:"Description,omitempty"` + + // ACLs is the configuration of ACLs for this namespace. It has its + // own struct so that we can add more to it in the future. + // This is nullable so that we can omit if empty when encoding in JSON + ACLs *NamespaceACLConfig `json:"ACLs,omitempty"` + + // Meta is a map that can be used to add kv metadata to the namespace definition + Meta map[string]string `json:"Meta,omitempty"` + + // DeletedAt is the time when the Namespace was marked for deletion + // This is nullable so that we can omit if empty when encoding in JSON + DeletedAt *time.Time `json:"DeletedAt,omitempty" alias:"deleted_at"` + + // Partition which contains the Namespace. + Partition string `json:"Partition,omitempty"` + + // CreateIndex is the Raft index at which the Namespace was created + CreateIndex uint64 `json:"CreateIndex,omitempty"` + + // ModifyIndex is the latest Raft index at which the Namespace was modified. + ModifyIndex uint64 `json:"ModifyIndex,omitempty"` +} + +func (n *Namespace) UnmarshalJSON(data []byte) error { + type Alias Namespace + aux := struct { + DeletedAtSnake *time.Time `json:"deleted_at"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.DeletedAt == nil && aux.DeletedAtSnake != nil { + n.DeletedAt = aux.DeletedAtSnake + } + + return nil +} + +// NamespaceACLConfig is the Namespace specific ACL configuration container +type NamespaceACLConfig struct { + // PolicyDefaults is the list of policies that should be used for the parent authorizer + // of all tokens in the associated namespace. + PolicyDefaults []ACLLink `json:"PolicyDefaults" alias:"policy_defaults"` + // RoleDefaults is the list of roles that should be used for the parent authorizer + // of all tokens in the associated namespace. + RoleDefaults []ACLLink `json:"RoleDefaults" alias:"role_defaults"` +} + +func (n *NamespaceACLConfig) UnmarshalJSON(data []byte) error { + type Alias NamespaceACLConfig + aux := struct { + PolicyDefaultsSnake []ACLLink `json:"policy_defaults"` + RoleDefaultsSnake []ACLLink `json:"role_defaults"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.PolicyDefaults == nil { + for _, pd := range aux.PolicyDefaultsSnake { + n.PolicyDefaults = append(n.PolicyDefaults, pd) + } + } + if n.RoleDefaults == nil { + for _, pd := range aux.RoleDefaultsSnake { + n.RoleDefaults = append(n.RoleDefaults, pd) + } + } + return nil +} + +// Namespaces can be used to manage Namespaces in Consul Enterprise.. +type Namespaces struct { + c *Client +} + +// Namespaces returns a handle to the namespaces endpoints. +func (c *Client) Namespaces() *Namespaces { + return &Namespaces{c} +} + +func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) { + if ns.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name for Namespace creation") + } + + r := n.c.newRequest("PUT", "/v1/namespace") + r.setWriteOptions(q) + r.obj = ns + rtt, resp, err := n.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out Namespace + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) { + if ns.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name for Namespace updating") + } + + r := n.c.newRequest("PUT", "/v1/namespace/"+ns.Name) + r.setWriteOptions(q) + r.obj = ns + rtt, resp, err := n.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out Namespace + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) { + var out Namespace + r := n.c.newRequest("GET", "/v1/namespace/"+name) + r.setQueryOptions(q) + rtt, resp, err := n.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) { + r := n.c.newRequest("DELETE", "/v1/namespace/"+name) + r.setWriteOptions(q) + rtt, resp, err := n.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { + var out []*Namespace + r := n.c.newRequest("GET", "/v1/namespaces") + r.setQueryOptions(q) + rtt, resp, err := n.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 0000000000..667dcd8723 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 0000000000..9228d89b47 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_audit.go b/vendor/github.com/hashicorp/consul/api/operator_audit.go new file mode 100644 index 0000000000..5240d38a70 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_audit.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// The /v1/operator/audit-hash endpoint is available only in Consul Enterprise and +// interact with its audit logging subsystem. + +package api + +type AuditHashRequest struct { + Input string +} + +type AuditHashResponse struct { + Hash string +} + +func (op *Operator) AuditHash(a *AuditHashRequest, q *QueryOptions) (*AuditHashResponse, error) { + r := op.c.newRequest("POST", "/v1/operator/audit-hash") + r.setQueryOptions(q) + r.obj = a + + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out AuditHashResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 0000000000..7628bf6f2f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,404 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // MinQuorum sets the minimum number of servers allowed in a cluster before + // autopilot can prune dead servers. + MinQuorum uint + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// Defines default values for the AutopilotConfiguration type, consistent with +// https://www.consul.io/api-docs/operator/autopilot#parameters-1 +func NewAutopilotConfiguration() AutopilotConfiguration { + cfg := AutopilotConfiguration{ + CleanupDeadServers: true, + LastContactThreshold: NewReadableDuration(200 * time.Millisecond), + MaxTrailingLogs: 250, + MinQuorum: 0, + ServerStabilizationTime: NewReadableDuration(10 * time.Second), + RedundancyZoneTag: "", + DisableUpgradeMigration: false, + UpgradeVersionTag: "", + } + + return cfg +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +type AutopilotState struct { + Healthy bool + FailureTolerance int + OptimisticFailureTolerance int + + Servers map[string]AutopilotServer + Leader string + Voters []string + ReadReplicas []string `json:",omitempty"` + RedundancyZones map[string]AutopilotZone `json:",omitempty"` + Upgrade *AutopilotUpgrade `json:",omitempty"` +} + +type AutopilotServer struct { + ID string + Name string + Address string + NodeStatus string + Version string + LastContact *ReadableDuration + LastTerm uint64 + LastIndex uint64 + Healthy bool + StableSince time.Time + RedundancyZone string `json:",omitempty"` + UpgradeVersion string `json:",omitempty"` + ReadReplica bool + Status AutopilotServerStatus + Meta map[string]string + NodeType AutopilotServerType +} + +type AutopilotServerStatus string + +const ( + AutopilotServerNone AutopilotServerStatus = "none" + AutopilotServerLeader AutopilotServerStatus = "leader" + AutopilotServerVoter AutopilotServerStatus = "voter" + AutopilotServerNonVoter AutopilotServerStatus = "non-voter" + AutopilotServerStaging AutopilotServerStatus = "staging" +) + +type AutopilotServerType string + +const ( + AutopilotTypeVoter AutopilotServerType = "voter" + AutopilotTypeReadReplica AutopilotServerType = "read-replica" + AutopilotTypeZoneVoter AutopilotServerType = "zone-voter" + AutopilotTypeZoneExtraVoter AutopilotServerType = "zone-extra-voter" + AutopilotTypeZoneStandby AutopilotServerType = "zone-standby" +) + +type AutopilotZone struct { + Servers []string + Voters []string + FailureTolerance int +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `json:",omitempty"` + TargetVersionNonVoters []string `json:",omitempty"` + OtherVersionVoters []string `json:",omitempty"` + OtherVersionNonVoters []string `json:",omitempty"` +} + +type AutopilotUpgrade struct { + Status AutopilotUpgradeStatus + TargetVersion string `json:",omitempty"` + TargetVersionVoters []string `json:",omitempty"` + TargetVersionNonVoters []string `json:",omitempty"` + TargetVersionReadReplicas []string `json:",omitempty"` + OtherVersionVoters []string `json:",omitempty"` + OtherVersionNonVoters []string `json:",omitempty"` + OtherVersionReadReplicas []string `json:",omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `json:",omitempty"` +} + +type AutopilotUpgradeStatus string + +const ( + // AutopilotUpgradeIdle is the status when no upgrade is in progress. + AutopilotUpgradeIdle AutopilotUpgradeStatus = "idle" + + // AutopilotUpgradeAwaitNewVoters is the status when more servers of + // the target version must be added in order to start the promotion + // phase of the upgrade + AutopilotUpgradeAwaitNewVoters AutopilotUpgradeStatus = "await-new-voters" + + // AutopilotUpgradePromoting is the status when autopilot is promoting + // servers of the target version. + AutopilotUpgradePromoting AutopilotUpgradeStatus = "promoting" + + // AutopilotUpgradeDemoting is the status when autopilot is demoting + // servers not on the target version + AutopilotUpgradeDemoting AutopilotUpgradeStatus = "demoting" + + // AutopilotUpgradeLeaderTransfer is the status when autopilot is transferring + // leadership from a server running an older version to a server + // using the target version. + AutopilotUpgradeLeaderTransfer AutopilotUpgradeStatus = "leader-transfer" + + // AutopilotUpgradeAwaitNewServers is the status when autpilot has finished + // transferring leadership and has demoted all the other versioned + // servers but wants to indicate that more target version servers + // are needed to replace all the existing other version servers. + AutopilotUpgradeAwaitNewServers AutopilotUpgradeStatus = "await-new-servers" + + // AutopilotUpgradeAwaitServerRemoval is the status when autopilot is waiting + // for the servers on non-target versions to be removed + AutopilotUpgradeAwaitServerRemoval AutopilotUpgradeStatus = "await-server-removal" + + // AutopilotUpgradeDisabled is the status when automated ugprades are + // disabled in the autopilot configuration + AutopilotUpgradeDisabled AutopilotUpgradeStatus = "disabled" +) + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + var dur time.Duration + str := string(raw) + if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' { + // quoted string + dur, err = time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + } else { + // no quotes, not a string + v, err := strconv.ParseFloat(str, 64) + if err != nil { + return err + } + dur = time.Duration(v) + } + + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := op.c.doRequest(r) + if err != nil { + return false, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return false, err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + + // we use 429 status to indicate unhealthiness + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + err = requireHttpCodes(resp, 200, 429) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +func (op *Operator) AutopilotState(q *QueryOptions) (*AutopilotState, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/state") + r.setQueryOptions(q) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out AutopilotState + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 0000000000..aefec9e270 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // Partition has the admin partition this request corresponds to. + Partition string `json:",omitempty"` + + // Messages has information or errors from serf + Messages map[string]string `json:",omitempty"` + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // A map of the encryption primary keys to the number of nodes they're installed on + PrimaryKeys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go new file mode 100644 index 0000000000..1e3496da0e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_license.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "io" + "strings" + "time" +) + +type License struct { + // The unique identifier of the license + LicenseID string `json:"license_id"` + + // The customer ID associated with the license + CustomerID string `json:"customer_id"` + + // If set, an identifier that should be used to lock the license to a + // particular site, cluster, etc. + InstallationID string `json:"installation_id"` + + // The time at which the license was issued + IssueTime time.Time `json:"issue_time"` + + // The time at which the license starts being valid + StartTime time.Time `json:"start_time"` + + // The time after which the license expires + ExpirationTime time.Time `json:"expiration_time"` + + // The time at which the license ceases to function and can + // no longer be used in any capacity + TerminationTime time.Time `json:"termination_time"` + + // Whether the license will ignore termination + IgnoreTermination bool `json:"ignore_termination"` + + // The product the license is valid for + Product string `json:"product"` + + // License Specific Flags + Flags map[string]interface{} `json:"flags"` + + // Modules is a list of the licensed enterprise modules + Modules []string `json:"modules"` + + // List of features enabled by the license + Features []string `json:"features"` +} + +type LicenseReply struct { + Valid bool + License *License + Warnings []string +} + +func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) { + var reply LicenseReply + if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil { + return nil, err + } else { + return &reply, nil + } +} + +func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) { + r := op.c.newRequest("GET", "/v1/operator/license") + r.params.Set("signed", "1") + r.setQueryOptions(q) + _, resp, err := op.c.doRequest(r) + if err != nil { + return "", err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", err + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + return string(data), nil +} + +// LicenseReset will reset the license to the builtin one if it is still valid. +// If the builtin license is invalid, the current license stays active. +// +// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses +// are now set via agent configuration instead of through the API +func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) { + var reply LicenseReply + r := op.c.newRequest("DELETE", "/v1/operator/license") + r.setWriteOptions(opts) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + if err := decodeBody(resp, &reply); err != nil { + return nil, err + } + return &reply, nil +} + +// LicensePut will configure the Consul Enterprise license for the target datacenter +// +// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses +// are now set via agent configuration instead of through the API +func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) { + var reply LicenseReply + r := op.c.newRequest("PUT", "/v1/operator/license") + r.setWriteOptions(opts) + r.body = strings.NewReader(license) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + if err := decodeBody(resp, &reply); err != nil { + return nil, err + } + + return &reply, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 0000000000..f0f5794aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// TransferLeaderResponse is returned when querying for the current Raft configuration. +type TransferLeaderResponse struct { + Success bool +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftLeaderTransfer is used to transfer the current raft leader to another node +// Optionally accepts a non-empty id of another node to transfer leadership to. +func (op *Operator) RaftLeaderTransfer(id string, q *QueryOptions) (*TransferLeaderResponse, error) { + r := op.c.newRequest("POST", "/v1/operator/raft/transfer-leader") + r.setQueryOptions(q) + + if id != "" { + r.params.Set("id", id) + } + _, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var out TransferLeaderResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", address) + + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", id) + + _, resp, err := op.c.doRequest(r) + if err != nil { + return err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 0000000000..6115a7ab4b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_usage.go b/vendor/github.com/hashicorp/consul/api/operator_usage.go new file mode 100644 index 0000000000..8977449ddd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_usage.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type Usage struct { + // Usage is a map of datacenter -> usage information + Usage map[string]ServiceUsage +} + +// ServiceUsage contains information about the number of services and service instances for a datacenter. +type ServiceUsage struct { + Nodes int + Services int + ServiceInstances int + ConnectServiceInstances map[string]int + + // Billable services are of "typical" service kind (i.e. non-connect or connect-native), + // excluding the "consul" service. + BillableServiceInstances int + + // A map of partition+namespace to number of unique services registered in that namespace + PartitionNamespaceServices map[string]map[string]int + + // A map of partition+namespace to number of service instances registered in that namespace + PartitionNamespaceServiceInstances map[string]map[string]int + + // A map of partition+namespace+kind to number of service-mesh instances registered in that namespace + PartitionNamespaceConnectServiceInstances map[string]map[string]map[string]int + + // A map of partition+namespace to number of billable instances registered in that namespace + PartitionNamespaceBillableServiceInstances map[string]map[string]int +} + +// Usage is used to query for usage information in the given datacenter. +func (op *Operator) Usage(q *QueryOptions) (*Usage, *QueryMeta, error) { + r := op.c.newRequest("GET", "/v1/operator/usage") + r.setQueryOptions(q) + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *Usage + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/partition.go b/vendor/github.com/hashicorp/consul/api/partition.go new file mode 100644 index 0000000000..8467c31189 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/partition.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "time" +) + +// Partition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. +type Partition struct { + // Name is the name of the Partition. + Name string `json:"Name"` + + // Description is where the user puts any information they want + // about the admin partition. It is not used internally. + Description string `json:"Description,omitempty"` + + // DeletedAt is the time when the Partition was marked for deletion + // This is nullable so that we can omit if empty when encoding in JSON + DeletedAt *time.Time `json:"DeletedAt,omitempty" alias:"deleted_at"` + + // CreateIndex is the Raft index at which the Partition was created + CreateIndex uint64 `json:"CreateIndex,omitempty"` + + // ModifyIndex is the latest Raft index at which the Partition was modified. + ModifyIndex uint64 `json:"ModifyIndex,omitempty"` +} + +// PartitionDefaultName is the default partition value. +const PartitionDefaultName = "default" + +// Partitions can be used to manage Partitions in Consul Enterprise. +type Partitions struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Partitions() *Partitions { + return &Partitions{c} +} + +func (p *Partitions) Create(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { + if partition.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name for Partition creation") + } + + r := p.c.newRequest("PUT", "/v1/partition") + r.setWriteOptions(q) + r.ctx = ctx + r.obj = partition + rtt, resp, err := p.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out Partition + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (p *Partitions) Update(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { + if partition.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name for Partition updating") + } + + r := p.c.newRequest("PUT", "/v1/partition/"+partition.Name) + r.setWriteOptions(q) + r.ctx = ctx + r.obj = partition + rtt, resp, err := p.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + var out Partition + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*Partition, *QueryMeta, error) { + var out Partition + r := p.c.newRequest("GET", "/v1/partition/"+name) + r.setQueryOptions(q) + r.ctx = ctx + rtt, resp, err := p.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +func (p *Partitions) Delete(ctx context.Context, name string, q *WriteOptions) (*WriteMeta, error) { + r := p.c.newRequest("DELETE", "/v1/partition/"+name) + r.setWriteOptions(q) + r.ctx = ctx + rtt, resp, err := p.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +func (p *Partitions) List(ctx context.Context, q *QueryOptions) ([]*Partition, *QueryMeta, error) { + var out []*Partition + r := p.c.newRequest("GET", "/v1/partitions") + r.setQueryOptions(q) + r.ctx = ctx + rtt, resp, err := p.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/peering.go b/vendor/github.com/hashicorp/consul/api/peering.go new file mode 100644 index 0000000000..dd7780f630 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/peering.go @@ -0,0 +1,295 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "time" +) + +// PeeringState enumerates all the states a peering can be in +type PeeringState string + +const ( + // PeeringStateUndefined represents an unset value for PeeringState during + // writes. + PeeringStateUndefined PeeringState = "UNDEFINED" + + // PeeringStatePending means the peering was created by generating a peering token. + // Peerings stay in a pending state until the peer uses the token to dial + // the local cluster. + PeeringStatePending PeeringState = "PENDING" + + // PeeringStateEstablishing means the peering is being established from a peering token. + // This is the initial state for dialing peers. + PeeringStateEstablishing PeeringState = "ESTABLISHING" + + // PeeringStateActive means that the peering connection is active and + // healthy. + PeeringStateActive PeeringState = "ACTIVE" + + // PeeringStateFailing means the peering connection has been interrupted + // but has not yet been terminated. + PeeringStateFailing PeeringState = "FAILING" + + // PeeringStateDeleting means a peering was marked for deletion and is in the process + // of being deleted. + PeeringStateDeleting PeeringState = "DELETING" + + // PeeringStateTerminated means the peering relationship has been removed. + PeeringStateTerminated PeeringState = "TERMINATED" +) + +type PeeringRemoteInfo struct { + // Partition is the remote peer's partition. + Partition string + // Datacenter is the remote peer's datacenter. + Datacenter string + Locality *Locality `json:",omitempty"` +} + +// Locality identifies where a given entity is running. +type Locality struct { + // Region is region the zone belongs to. + Region string + + // Zone is the zone the entity is running in. + Zone string +} + +type Peering struct { + // ID is a datacenter-scoped UUID for the peering. + ID string + // Name is the local alias for the peering relationship. + Name string + // Partition is the local partition connecting to the peer. + Partition string `json:",omitempty"` + // DeletedAt is the time when the Peering was marked for deletion + DeletedAt *time.Time `json:",omitempty" alias:"deleted_at"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + State PeeringState + // PeerID is the ID that our peer assigned to this peering. This ID is to + // be used when dialing the peer, so that it can know who dialed it. + PeerID string `json:",omitempty"` + // PeerCAPems contains all the CA certificates for the remote peer. + PeerCAPems []string `json:",omitempty"` + // PeerServerName is the name of the remote server as it relates to TLS. + PeerServerName string `json:",omitempty"` + // PeerServerAddresses contains all the connection addresses for the remote peer. + PeerServerAddresses []string `json:",omitempty"` + // StreamStatus contains information computed on read based on the state of the stream. + StreamStatus PeeringStreamStatus + // CreateIndex is the Raft index at which the Peering was created. + CreateIndex uint64 + // ModifyIndex is the latest Raft index at which the Peering was modified. + ModifyIndex uint64 + // Remote contains metadata for the remote peer. + Remote PeeringRemoteInfo +} + +type PeeringStreamStatus struct { + // ImportedServices is the list of services imported from this peering. + ImportedServices []string + // ExportedServices is the list of services exported to this peering. + ExportedServices []string + // LastHeartbeat represents when the last heartbeat message was received. + LastHeartbeat *time.Time + // LastReceive represents when any message was last received, regardless of success or error. + LastReceive *time.Time + // LastSend represents when any message was last sent, regardless of success or error. + LastSend *time.Time +} + +type PeeringReadResponse struct { + Peering *Peering +} + +type PeeringGenerateTokenRequest struct { + // PeerName is the name of the remote peer. + PeerName string + // Partition to be peered. + Partition string `json:",omitempty"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` + // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify + // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server + // addresses obtained from the "consul" service. + ServerExternalAddresses []string `json:",omitempty"` +} + +type PeeringGenerateTokenResponse struct { + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + PeeringToken string +} + +type PeeringEstablishRequest struct { + // Name of the remote peer. + PeerName string + // The peering token returned from the peer's GenerateToken endpoint. + PeeringToken string `json:",omitempty"` + // Partition to be peered. + Partition string `json:",omitempty"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` +} + +type PeeringEstablishResponse struct { +} + +type PeeringListRequest struct { + // future proofing in case we extend List functionality +} + +type Peerings struct { + c *Client +} + +// Peerings returns a handle to the operator endpoints. +func (c *Client) Peerings() *Peerings { + return &Peerings{c: c} +} + +func (p *Peerings) Read(ctx context.Context, name string, q *QueryOptions) (*Peering, *QueryMeta, error) { + if name == "" { + return nil, nil, fmt.Errorf("peering name cannot be empty") + } + + req := p.c.newRequest("GET", fmt.Sprintf("/v1/peering/%s", name)) + req.setQueryOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out Peering + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +func (p *Peerings) Delete(ctx context.Context, name string, q *WriteOptions) (*WriteMeta, error) { + if name == "" { + return nil, fmt.Errorf("peering name cannot be empty") + } + + req := p.c.newRequest("DELETE", fmt.Sprintf("/v1/peering/%s", name)) + req.setWriteOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// TODO(peering): verify this is the ultimate signature we want +func (p *Peerings) GenerateToken(ctx context.Context, g PeeringGenerateTokenRequest, wq *WriteOptions) (*PeeringGenerateTokenResponse, *WriteMeta, error) { + if g.PeerName == "" { + return nil, nil, fmt.Errorf("peer name cannot be empty") + } + + req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/token")) + req.setWriteOptions(wq) + req.ctx = ctx + req.obj = g + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + + var out PeeringGenerateTokenResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TODO(peering): verify this is the ultimate signature we want +func (p *Peerings) Establish(ctx context.Context, i PeeringEstablishRequest, wq *WriteOptions) (*PeeringEstablishResponse, *WriteMeta, error) { + req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/establish")) + req.setWriteOptions(wq) + req.ctx = ctx + req.obj = i + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + + var out PeeringEstablishResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (p *Peerings) List(ctx context.Context, q *QueryOptions) ([]*Peering, *QueryMeta, error) { + req := p.c.newRequest("GET", "/v1/peerings") + req.setQueryOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Peering + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 0000000000..8ebc852f3a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// QueryFailoverOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryFailoverOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string + + // Targets is a fixed list of datacenters and peers to try. This field cannot + // be populated with NearestN or Datacenters. + Targets []QueryFailoverTarget +} + +// Deprecated: use QueryFailoverOptions instead. +type QueryDatacenterOptions = QueryFailoverOptions + +type QueryFailoverTarget struct { + // Peer specifies a peer to try during failover. + Peer string + + // Datacenter specifies a datacenter to try during failover. + Datacenter string + + // Partition specifies a partition to try during failover + // Note: Partition are available only in Consul Enterprise + Partition string `json:",omitempty"` + + // Namespace specifies a namespace to try during failover + // Note: Namespaces are available only in Consul Enterprise + Namespace string `json:",omitempty"` +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // SamenessGroup specifies a sameness group to query. The first member of the Sameness Group will + // be targeted first on PQ execution and subsequent members will be targeted during failover scenarios. + // This field is mutually exclusive with Failover. + SamenessGroup string `json:",omitempty"` + + // Namespace of the service to query + Namespace string `json:",omitempty"` + + // Partition of the service to query + Partition string `json:",omitempty"` + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryFailoverOptions `json:",omitempty"` + + // IgnoreCheckIDs is an optional list of health check IDs to ignore when + // considering which nodes are healthy. It is useful as an emergency measure + // to temporarily override some health check that is producing false negatives + // for example. + IgnoreCheckIDs []string + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string + + // ServiceMeta is a map of required service metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + ServiceMeta map[string]string + + // Connect if true will filter the prepared query results to only + // include Connect-capable services. These include both native services + // and proxies for matching services. Note that if a proxy matches, + // the constraints in the query above (Near, OnlyPassing, etc.) apply + // to the _proxy_ and not the service being proxied. In practice, proxies + // should be directly next to their services so this isn't an issue. + Connect bool +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string + + // RemoveEmptyTags if set to true, will cause the Tags list inside + // the Service structure to be stripped of any empty strings. This is useful + // when interpolating into tags in a way where the tag is optional, and + // where searching for an empty tag would yield no results from the query. + RemoveEmptyTags bool +} + +// PreparedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Namespace of the service that was queried + Namespace string `json:",omitempty"` + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := c.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 0000000000..639513d29f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 0000000000..9d98ff5c29 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,533 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever + Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + wOpts := WriteOptions{Namespace: s.opts.Namespace} + + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), &wOpts) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + Namespace: s.opts.Namespace, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > s.opts.SemaphoreWaitTime { + return nil, nil + } + + // Query wait time should not exceed the semaphore wait time + qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, &qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, &wOpts) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + + wOpts := WriteOptions{Namespace: s.opts.Namespace} + qOpts := QueryOptions{Namespace: s.opts.Namespace} + +READ: + pair, _, err := kv.Get(key, &qOpts) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, &wOpts) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, &wOpts); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + + q := QueryOptions{Namespace: s.opts.Namespace} + pairs, _, err := kv.List(s.opts.Prefix, &q) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + w := WriteOptions{Namespace: s.opts.Namespace} + didRemove, _, err := kv.DeleteCAS(lockPair, &w) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + + w := WriteOptions{Namespace: s.opts.Namespace} + id, _, err := session.Create(se, &w) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := QueryOptions{ + RequireConsistent: true, + Namespace: s.opts.Namespace, + } +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, &opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 0000000000..69fd77d279 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + LockDelay time.Duration + Behavior string + TTL string + Namespace string `json:",omitempty"` + + // Deprecated for Consul Enterprise in v1.7.0. + Checks []string + + // NodeChecks and ServiceChecks are new in Consul 1.7.0. + // When associating checks with sessions, namespaces can be specified for service checks. + NodeChecks []string + ServiceChecks []ServiceCheck +} + +type ServiceCheck struct { + ID string + Namespace string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["NodeChecks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if len(se.NodeChecks) > 0 { + body["NodeChecks"] = se.NodeChecks + } + if len(se.ServiceChecks) > 0 { + body["ServiceChecks"] = se.ServiceChecks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 0000000000..bcc80e5b3d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.header.Set("Content-Type", "application/octet-stream") + r.setWriteOptions(q) + _, resp, err := s.c.doRequest(r) + if err != nil { + return err + } + if err := requireOK(resp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 0000000000..8c52eb222b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) LeaderWithQueryOptions(q *QueryOptions) (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + + if q != nil { + r.setQueryOptions(q) + } + + _, resp, err := s.c.doRequest(r) + if err != nil { + return "", err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return "", err + } + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +func (s *Status) Leader() (string, error) { + return s.LeaderWithQueryOptions(nil) +} + +// Peers is used to query for a known raft peers +func (s *Status) PeersWithQueryOptions(q *QueryOptions) ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + + if q != nil { + r.setQueryOptions(q) + } + + _, resp, err := s.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} + +func (s *Status) Peers() ([]string, error) { + return s.PeersWithQueryOptions(nil) +} diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go new file mode 100644 index 0000000000..59adafdac3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +// Txn is used to manipulate the Txn API +type Txn struct { + c *Client +} + +// Txn is used to return a handle to the K/V apis +func (c *Client) Txn() *Txn { + return &Txn{c} +} + +// TxnOp is the internal format we send to Consul. Currently only K/V and +// check operations are supported. +type TxnOp struct { + KV *KVTxnOp + Node *NodeTxnOp + Service *ServiceTxnOp + Check *CheckTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair + Node *Node + Service *CatalogService + Check *HealthCheck +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// KVOp constants give possible operations available in a transaction. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetOrEmpty KVOp = "get-or-empty" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// SessionOp constants give possible operations available in a transaction. +type SessionOp string + +const ( + SessionDelete SessionOp = "delete" +) + +// SessionTxnOp defines a single operation inside a transaction. +type SessionTxnOp struct { + Verb SessionOp + Session Session +} + +// NodeOp constants give possible operations available in a transaction. +type NodeOp string + +const ( + NodeGet NodeOp = "get" + NodeSet NodeOp = "set" + NodeCAS NodeOp = "cas" + NodeDelete NodeOp = "delete" + NodeDeleteCAS NodeOp = "delete-cas" +) + +// NodeTxnOp defines a single operation inside a transaction. +type NodeTxnOp struct { + Verb NodeOp + Node Node +} + +// ServiceOp constants give possible operations available in a transaction. +type ServiceOp string + +const ( + ServiceGet ServiceOp = "get" + ServiceSet ServiceOp = "set" + ServiceCAS ServiceOp = "cas" + ServiceDelete ServiceOp = "delete" + ServiceDeleteCAS ServiceOp = "delete-cas" +) + +// ServiceTxnOp defines a single operation inside a transaction. +type ServiceTxnOp struct { + Verb ServiceOp + Node string + Service AgentService +} + +// CheckOp constants give possible operations available in a transaction. +type CheckOp string + +const ( + CheckGet CheckOp = "get" + CheckSet CheckOp = "set" + CheckCAS CheckOp = "cas" + CheckDelete CheckOp = "delete" + CheckDeleteCAS CheckOp = "delete-cas" +) + +// CheckTxnOp defines a single operation inside a transaction. +type CheckTxnOp struct { + Verb CheckOp + Check HealthCheck +} + +// Txn is used to apply multiple Consul operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the different fields in the TxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// &CheckTxnOp{ +// Verb: CheckSet, +// HealthCheck: HealthCheck{ +// Node: "foo", +// CheckID: "redis:a", +// Name: "Redis Health Check", +// Status: "passing", +// }, +// } +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. For KV operations, Deleted keys will have a nil entry in the +// results, and to save space, the Value of each key in the Results will be nil +// unless the operation is a KVGet. If the transaction was rolled back, the Errors +// member will have entries referencing the index of the operation that failed +// along with an error message. +func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + return t.c.txn(txn, q) +} + +func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + r := c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + r.obj = txn + rtt, resp, err := c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer closeResponseBody(resp) + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + return resp.StatusCode == http.StatusOK, &txnResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 0000000000..036e5313fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 0000000000..fe28d15b6f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,58 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 0000000000..05841092a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000000..3c845dc0dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 0000000000..42cc4105ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 0000000000..9938fb50ee --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 HashiCorp, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 0000000000..21a17c5af3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +This library has reached 1.0 stability. Its API can be considered solidified +and promised through future versions. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +totalBandwidth := 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. + +If the log lines start with a timestamp you can use the +`InferLevelsWithTimestamp` option to try and ignore them. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 0000000000..d00816b38f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +//go:build !windows +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// hasFD is used to check if the writer has an Fd value to check +// if it's a terminal. +type hasFD interface { + Fd() uintptr +} + +// setColorization will mutate the values of this logger +// to appropriately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + if opts.Color != AutoColor { + return + } + + if sc, ok := l.writer.w.(SupportsColor); ok { + if !sc.SupportsColor() { + l.headerColor = ColorOff + l.writer.color = ColorOff + } + return + } + + fi, ok := l.writer.w.(hasFD) + if !ok { + return + } + + if !isatty.IsTerminal(fi.Fd()) { + l.headerColor = ColorOff + l.writer.color = ColorOff + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 0000000000..2c3fb9ea6f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +//go:build windows +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" +) + +// setColorization will mutate the values of this logger +// to appropriately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + if opts.Color == ColorOff { + return + } + + fi, ok := l.writer.w.(*os.File) + if !ok { + l.writer.color = ColorOff + l.headerColor = ColorOff + return + } + + cfi := colorable.NewColorable(fi) + + // NewColorable detects if color is possible and if it's not, then it + // returns the original value. So we can test if we got the original + // value back to know if color is possible. + if cfi == fi { + l.writer.color = ColorOff + l.headerColor = ColorOff + } else { + l.writer.w = cfi + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 0000000000..eb5aba556b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 0000000000..4b73ba553d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 0000000000..a7403f593a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "sync" + "time" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + TimeFn: time.Now, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Named() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// care should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 0000000000..e9b1c18853 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log + l.callerOffset += 2 + } + intercept := &interceptLogger{ + Logger: l, + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.log(level, msg, args...) +} + +// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc +// all called Log then direct calls to Log would have a different stack frame +// depth. By having all the methods call the same helper we ensure the stack +// frame depth is the same. +func (i *interceptLogger) log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.log(Trace, msg, args...) +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.log(Debug, msg, args...) +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.log(Info, msg, args...) +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.log(Error, msg, args...) +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name descending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + return i.NamedIntercept(name) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + return i.ResetNamedIntercept(name) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.Named(name) + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.ResetNamed(name) + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriter(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 0000000000..b45064acf1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,918 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + "unicode/utf8" + + "github.com/fatih/color" +) + +// TimeFormat is the time format to use for plain (non-JSON) output. +// This is a version of RFC3339 that contains millisecond precision. +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// TimeFormatJSON is the time format to use for JSON output. +// This is a version of RFC3339 that contains microsecond precision. +const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } + + faintBoldColor = color.New(color.Faint, color.Bold) + faintColor = color.New(color.Faint) + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + callerOffset int + name string + timeFormat string + timeFn TimeFunction + disableTime bool + + // This is an interface so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex Locker + writer *writer + level *int32 + + headerColor ColorOption + fieldColor ColorOption + + implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool + + subloggerHook func(sub Logger) Logger +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept + l.callerOffset += 2 + } + return l +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + var ( + primaryColor ColorOption = ColorOff + headerColor ColorOption = ColorOff + fieldColor ColorOption = ColorOff + ) + switch { + case opts.ColorHeaderOnly: + headerColor = opts.Color + case opts.ColorHeaderAndFields: + fieldColor = opts.Color + headerColor = opts.Color + default: + primaryColor = opts.Color + } + + l := &intLogger{ + json: opts.JSONFormat, + name: opts.Name, + timeFormat: TimeFormat, + timeFn: time.Now, + disableTime: opts.DisableTime, + mutex: mutex, + writer: newWriter(output, primaryColor), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, + headerColor: headerColor, + fieldColor: fieldColor, + subloggerHook: opts.SubloggerHook, + } + if opts.IncludeLocation { + l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset + } + + if l.json { + l.timeFormat = TimeFormatJSON + } + if opts.TimeFn != nil { + l.timeFn = opts.TimeFn + } + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + if l.subloggerHook == nil { + l.subloggerHook = identityHook + } + + l.setColorization(opts) + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +func identityHook(logger Logger) Logger { + return logger +} + +// offsetIntLogger is the stack frame offset in the call stack for the caller to +// one of the Warn, Info, Log, etc methods. +const offsetIntLogger = 3 + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := l.timeFn() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.logPlain(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// isNormal indicates if the rune is one allowed to exist as an unquoted +// string value. This is a subset of ASCII, `-` through `~`. +func isNormal(r rune) bool { + return 0x2D <= r && r <= 0x7E // - through ~ +} + +// needsQuoting returns false if all the runes in string are normal, according +// to isNormal +func needsQuoting(str string) bool { + for _, r := range str { + if !isNormal(r) { + return true + } + } + + return false +} + +// logPlain is the non-JSON logging format function which writes directly +// to the underlying writer the logger was initialized with. +// +// If the logger was initialized with a color function, it also handles +// applying the color to the log message. +// +// Color Options +// 1. No color. +// 2. Color the whole log line, based on the level. +// 3. Color only the header (level) part of the log line. +// 4. Color both the header and fields of the log line. +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + + if !l.disableTime { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } + + s, ok := _levelToBracket[level] + if ok { + if l.headerColor != ColorOff { + color := _levelToColor[level] + color.Fprint(l.writer, s) + } else { + l.writer.WriteString(s) + } + } else { + l.writer.WriteString("[?????]") + } + + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + if msg != "" { + l.writer.WriteString(": ") + l.writer.WriteString(msg) + } + } else if msg != "" { + l.writer.WriteString(msg) + } + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + l.writer.WriteByte(':') + + // Handle the field arguments, which come in pairs (key=val). + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + key string + val string + raw bool + ) + + // Convert the field value to a string. + switch st := args[i+1].(type) { + case string: + val = st + if st == "" { + val = `""` + raw = true + } + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + // Convert the field key to a string. + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + + // Optionally apply the ANSI "faint" and "bold" + // SGR values to the key. + if l.fieldColor != ColorOff { + key = faintBoldColor.Sprint(key) + } + + // Values may contain multiple lines, and that format + // is preserved, with each line prefixed with a " | " + // to show it's part of a collection of lines. + // + // Values may also need quoting, if not all the runes + // in the value string are "normal", like if they + // contain ANSI escape sequences. + if strings.Contains(val, "\n") { + l.writer.WriteString("\n ") + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparatorWithNewLine) + writeIndent(l.writer, val, faintMultiLinePrefix) + } else { + l.writer.WriteString("=\n") + writeIndent(l.writer, val, " | ") + } + l.writer.WriteString(" ") + } else if !raw && needsQuoting(val) { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteByte('"') + writeEscapedForOutput(l.writer, val, true) + l.writer.WriteByte('"') + } else { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") + } +} + +func writeIndent(w *writer, str string, indent string) { + for { + nl := strings.IndexByte(str, "\n"[0]) + if nl == -1 { + if str != "" { + w.WriteString(indent) + writeEscapedForOutput(w, str, false) + w.WriteString("\n") + } + return + } + + w.WriteString(indent) + writeEscapedForOutput(w, str[:nl], false) + w.WriteString("\n") + str = str[nl+1:] + } +} + +func needsEscaping(str string) bool { + for _, b := range str { + if !unicode.IsPrint(b) || b == '"' { + return true + } + } + + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) { + if !needsEscaping(str) { + w.Write([]byte(str)) + return + } + + bb := bufPool.Get().(*bytes.Buffer) + bb.Reset() + + defer bufPool.Put(bb) + + for _, r := range str { + if escapeQuotes && r == '"' { + bb.WriteString(`\"`) + } else if unicode.IsPrint(r) { + bb.WriteRune(r) + } else { + switch r { + case '\a': + bb.WriteString(`\a`) + case '\b': + bb.WriteString(`\b`) + case '\f': + bb.WriteString(`\f`) + case '\n': + bb.WriteString(`\n`) + case '\r': + bb.WriteString(`\r`) + case '\t': + bb.WriteString(`\t`) + case '\v': + bb.WriteString(`\v`) + default: + switch { + case r < ' ': + bb.WriteString(`\x`) + bb.WriteByte(lowerhex[byte(r)>>4]) + bb.WriteByte(lowerhex[byte(r)&0xF]) + case !utf8.ValidRune(r): + r = 0xFFFD + fallthrough + case r < 0x10000: + bb.WriteString(`\u`) + for s := 12; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + default: + bb.WriteString(`\U`) + for s := 28; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + } + } + } + } + + w.Write(bb.Bytes()) +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = strconv.Quote(sv.String()) + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + if strings.ContainsAny(val, " \t\n\r") { + val = strconv.Quote(val) + } + } + + buf.WriteString(val) + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + for i := 0; i < len(args); i = i + 2 { + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + } + if !l.disableTime { + vals["@timestamp"] = t.Format(l.timeFormat) + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +const MissingKey = "EXTRA_VALUE_AT_END" + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + + if len(args)%2 != 0 { + extra = args[len(args)-1] + args = args[:len(args)-1] + } + + sl := l.copy() + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return l.subloggerHook(sl) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := l.copy() + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return l.subloggerHook(sl) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := l.copy() + + sl.name = name + + return l.subloggerHook(sl) +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Returns the current level +func (l *intLogger) GetLevel() Level { + return Level(atomic.LoadInt32(l.level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + newLog := *l + if l.callerOffset > 0 { + // the stack is + // logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch() + // So plus 4. + newLog.callerOffset = l.callerOffset + 4 + } + return &stdlogAdapter{ + log: &newLog, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, + } +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 0000000000..947ac0c9af --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,393 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "io" + "log" + "os" + "strings" + "time" +) + +var ( + // DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer low-level analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 + + // Off disables all logging output. + Off Level = 6 +) + +// Format is a simple convenience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convenience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// SupportsColor is an optional interface that can be implemented by the output +// value. If implemented and SupportsColor() returns true, then AutoColor will +// enable colorization. +type SupportsColor interface { + SupportsColor() bool +} + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + case "off": + return Off + default: + return NoLevel + } +} + +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + case Off: + return "off" + default: + return "unknown" + } +} + +// Logger describes the interface that must be implemented by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Returns the current level + GetLevel() Level + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them while ignoring possible + // timestamp values in the beginning of the string. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + // The timestamp detection may result in false positives and incomplete + // string outputs. + InferLevelsWithTimestamp bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +type TimeFunction = func() time.Time + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is suppressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // AdditionalLocationOffset is the number of additional stack levels to skip + // when finding the file and line information for the log line + AdditionalLocationOffset int + + // The time format to use instead of the default + TimeFormat string + + // A function which is called to get the time object that is formatted using `TimeFormat` + TimeFn TimeFunction + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only available for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // Only color the header, not the body. This can help with readability of long messages. + ColorHeaderOnly bool + + // Color the header and message body fields. This can help with readability + // of long messages with multiple fields. + ColorHeaderAndFields bool + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not affect any subloggers, and SetLevel on any subloggers + // will not affect the parent or sibling loggers. + IndependentLevels bool + + // SubloggerHook registers a function that is called when a sublogger via + // Named, With, or ResetNamed is created. If defined, the function is passed + // the newly created Logger and the returned Logger is returned from the + // original function. This option allows customization via interception and + // wrapping of Logger instances. + SubloggerHook func(sub Logger) Logger +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Deprecated: use StandardLogger + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Deprecated: use StandardWriter + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 0000000000..d43da809eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Name() string { return "" } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) GetLevel() Level { return NoLevel } + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 0000000000..9b27bd3d3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 0000000000..03739b61fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "bytes" + "log" + "regexp" + "strings" +) + +// Regex to ignore characters commonly found in timestamp formats from the +// beginning of inputs. +var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + inferLevelsWithTimestamp bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { + if s.inferLevelsWithTimestamp { + str = s.trimTimestamp(str) + } + + level, str := s.pickLevel(str) + s.dispatch(str, level) + } else { + s.log.Info(str) + } + + return len(data), nil +} + +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} + +func (s *stdlogAdapter) trimTimestamp(str string) string { + idx := logTimestampRegexp.FindStringIndex(str) + return str[idx[1]:] +} + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 0000000000..4ee219bf0c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 0000000000..86c6d03fba --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,23 @@ +# UNRELEASED + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 0000000000..aca15a6421 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 0000000000..a63674775f --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 0000000000..168bda76df --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,676 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn) Clone() *Txn { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 0000000000..f17d0a644f --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node) { + i.stack = append(i.stack, edges{edge{node: n}}) + } + + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + { + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 0000000000..3598548087 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,334 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 0000000000..3c6a22525c --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + { + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 0000000000..554fa7129c --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,239 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + found := func(n *Node) { + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 0000000000..80e1de44e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 0000000000..c3989e789f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 0000000000..6a128e1e14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,44 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + Certificate: os.Getenv("MYAPP_CERTIFICATE"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 0000000000..b55cc62848 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 0000000000..69aabd6bc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,123 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When CAFile, +// CACertificate and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CACertificate and CAPath. + CAFile string + + // CACertificate is a PEM-encoded certificate or bundle. Takes precedence + // over CAPath. + CACertificate []byte + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if len(c.CACertificate) != 0 { + return AppendCertificate(c.CACertificate) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool. +func AppendCertificate(ca []byte) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + ok := pool.AppendCertsFromPEM(ca) + if !ok { + return nil, errors.New("Error appending CA: Couldn't parse PEM") + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 0000000000..66b1472c4a --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 0000000000..a9a040657f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000000..be2cc4dfb6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 0000000000..a86c8539e0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 0000000000..92d70934d6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 0000000000..32124a73a2 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,243 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // stats is used to record events that occur when updating coordinates. + stats ClientStats + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(coord); err != nil { + return err + } + + c.coord = coord.Clone() + return nil +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + // The code down below can handle zero RTTs, which we have seen in + // https://github.com/hashicorp/consul/issues/3789, presumably in + // environments with coarse-grained monotonic clocks (we are still + // trying to pin this down). In any event, this is ok from a code PoV + // so we don't need to alert operators with spammy messages. We did + // add a counter so this is still observable, though. + const maxRTT = 10 * time.Second + if rtt < 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + if rtt == 0 { + metrics.IncrCounterWithLabels([]string{"serf", "coordinate", "zero-rtt"}, 1, c.config.MetricLabels) + } + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 0000000000..09c0cafe83 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,77 @@ +package coordinate + +import ( + "github.com/armon/go-metrics" +) + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 + + // metricLabels is the slice of labels to put on all emitted metrics + MetricLabels []metrics.Label +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 0000000000..fbe792c90d --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,203 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 0000000000..66da4e2e92 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000000..d70706d5b3 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..25378537ea --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/go.etcd.io/etcd/api/v3/LICENSE b/vendor/go.etcd.io/etcd/api/v3/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go new file mode 100644 index 0000000000..16affcd62c --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go @@ -0,0 +1,1158 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: auth.proto + +package authpb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var Permission_Type_name = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} + +var Permission_Type_value = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(Permission_Type_name, int32(x)) +} + +func (Permission_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{2, 0} +} + +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{0} +} +func (m *UserAddOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserAddOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserAddOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserAddOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAddOptions.Merge(m, src) +} +func (m *UserAddOptions) XXX_Size() int { + return m.Size() +} +func (m *UserAddOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UserAddOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAddOptions proto.InternalMessageInfo + +// User is a single entry in the bucket authUsers +type User struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{1} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_User.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(m, src) +} +func (m *User) XXX_Size() int { + return m.Size() +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +// Permission is a single entity +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{2} +} +func (m *Permission) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Permission.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Permission) XXX_Merge(src proto.Message) { + xxx_messageInfo_Permission.Merge(m, src) +} +func (m *Permission) XXX_Size() int { + return m.Size() +} +func (m *Permission) XXX_DiscardUnknown() { + xxx_messageInfo_Permission.DiscardUnknown(m) +} + +var xxx_messageInfo_Permission proto.InternalMessageInfo + +// Role is a single entry in the bucket authRoles +type Role struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{3} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Role.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") +} + +func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } + +var fileDescriptor_8bbd6f3875b0e874 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} + +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserAddOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NoPassword { + i-- + if m.NoPassword { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAuth(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Roles) > 0 { + for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Roles[iNdEx]) + copy(dAtA[i:], m.Roles[iNdEx]) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if m.PermType != 0 { + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.KeyPermission) > 0 { + for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAuth(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + offset -= sovAuth(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *UserAddOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NoPassword { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *User) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Permission) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PermType != 0 { + n += 1 + sovAuth(uint64(m.PermType)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.KeyPermission) > 0 { + for _, e := range m.KeyPermission { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAuth(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPassword = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) + if m.Password == nil { + m.Password = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) + } + m.PermType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PermType |= Permission_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPermission = append(m.KeyPermission, &Permission{}) + if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAuth + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAuth + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAuth + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto new file mode 100644 index 0000000000..8f82b7cf1e --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package authpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message UserAddOptions { + bool no_password = 1; +}; + +// User is a single entry in the bucket authUsers +message User { + bytes name = 1; + bytes password = 2; + repeated string roles = 3; + UserAddOptions options = 4; +} + +// Permission is a single entity +message Permission { + enum Type { + READ = 0; + WRITE = 1; + READWRITE = 2; + } + Type permType = 1; + + bytes key = 2; + bytes range_end = 3; +} + +// Role is a single entry in the bucket authRoles +message Role { + bytes name = 1; + + repeated Permission keyPermission = 2; +} diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go new file mode 100644 index 0000000000..38434d09c5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,1002 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: etcdserver.proto + +package etcdserverpb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_09ffbeb3bebbce7e, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_09ffbeb3bebbce7e, []int{1} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } + +var fileDescriptor_09ffbeb3bebbce7e = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Refresh != nil { + i-- + if *m.Refresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + i-- + if m.Stream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + i-- + dAtA[i] = 0x78 + i-- + if m.Quorum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + i-- + if m.Sorted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x58 + i-- + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + i-- + dAtA[i] = 0x48 + if m.PrevExist != nil { + i-- + if *m.PrevExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + i-- + dAtA[i] = 0x38 + i -= len(m.PrevValue) + copy(dAtA[i:], m.PrevValue) + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i-- + dAtA[i] = 0x32 + i-- + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i -= len(m.Val) + copy(dAtA[i:], m.Val) + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i-- + dAtA[i] = 0x22 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + i -= len(m.Method) + copy(dAtA[i:], m.Method) + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i-- + dAtA[i] = 0x12 + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) + i-- + dAtA[i] = 0x10 + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { + offset -= sovEtcdserver(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.ID)) + l = len(m.Method) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Path) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Val) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 2 + l = len(m.PrevValue) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 1 + sovEtcdserver(uint64(m.PrevIndex)) + if m.PrevExist != nil { + n += 2 + } + n += 1 + sovEtcdserver(uint64(m.Expiration)) + n += 2 + n += 1 + sovEtcdserver(uint64(m.Since)) + n += 2 + n += 2 + n += 2 + n += 1 + sovEtcdserver(uint64(m.Time)) + n += 3 + if m.Refresh != nil { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.NodeID)) + n += 1 + sovEtcdserver(uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEtcdserver(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEtcdserver(x uint64) (n int) { + return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEtcdserver + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEtcdserver + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEtcdserver + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Dir = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEtcdserver + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) + } + m.PrevIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PrevExist = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) + } + m.Expiration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sorted = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Quorum = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stream = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Refresh = &b + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + m.ClusterID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEtcdserver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEtcdserver + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEtcdserver + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEtcdserver + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto new file mode 100644 index 0000000000..25e0aca5d9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto @@ -0,0 +1,34 @@ +syntax = "proto2"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Request { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional string Method = 2 [(gogoproto.nullable) = false]; + optional string Path = 3 [(gogoproto.nullable) = false]; + optional string Val = 4 [(gogoproto.nullable) = false]; + optional bool Dir = 5 [(gogoproto.nullable) = false]; + optional string PrevValue = 6 [(gogoproto.nullable) = false]; + optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; + optional bool PrevExist = 8 [(gogoproto.nullable) = true]; + optional int64 Expiration = 9 [(gogoproto.nullable) = false]; + optional bool Wait = 10 [(gogoproto.nullable) = false]; + optional uint64 Since = 11 [(gogoproto.nullable) = false]; + optional bool Recursive = 12 [(gogoproto.nullable) = false]; + optional bool Sorted = 13 [(gogoproto.nullable) = false]; + optional bool Quorum = 14 [(gogoproto.nullable) = false]; + optional int64 Time = 15 [(gogoproto.nullable) = false]; + optional bool Stream = 16 [(gogoproto.nullable) = false]; + optional bool Refresh = 17 [(gogoproto.nullable) = true]; +} + +message Metadata { + optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; + optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go new file mode 100644 index 0000000000..b94a7bfd9d --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,2673 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft_internal.proto + +package etcdserverpb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + membershippb "go.etcd.io/etcd/api/v3/membershippb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{0} +} +func (m *RequestHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestHeader.Merge(m, src) +} +func (m *RequestHeader) XXX_Size() int { + return m.Size() +} +func (m *RequestHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RequestHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestHeader proto.InternalMessageInfo + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` + AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` + ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"` + ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"` + DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{1} +} +func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InternalRaftRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalRaftRequest.Merge(m, src) +} +func (m *InternalRaftRequest) XXX_Size() int { + return m.Size() +} +func (m *InternalRaftRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo + +type EmptyResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{2} +} +func (m *EmptyResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EmptyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyResponse.Merge(m, src) +} +func (m *EmptyResponse) XXX_Size() int { + return m.Size() +} +func (m *EmptyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{3} +} +func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src) +} +func (m *InternalAuthenticateRequest) XXX_Size() int { + return m.Size() +} +func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } + +var fileDescriptor_b4c9a9be0cfca103 = []byte{ + // 1003 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xd9, 0x72, 0x1b, 0x45, + 0x14, 0x86, 0x23, 0xc5, 0x71, 0xac, 0x96, 0xed, 0x38, 0x6d, 0x87, 0x34, 0x72, 0x95, 0x70, 0x1c, + 0x12, 0xcc, 0x66, 0x53, 0xca, 0x03, 0x80, 0x90, 0x5c, 0x8e, 0xab, 0x42, 0x70, 0x4d, 0xcc, 0x52, + 0xc5, 0xc5, 0xd0, 0x9a, 0x39, 0x96, 0x06, 0xcf, 0x46, 0x77, 0x4b, 0x31, 0xef, 0x11, 0x28, 0x1e, + 0x83, 0xed, 0x21, 0x72, 0xc1, 0x62, 0xe0, 0x05, 0xc0, 0xdc, 0x70, 0x0f, 0xdc, 0x53, 0xbd, 0xcc, + 0x26, 0xb5, 0x7c, 0xa7, 0xf9, 0xcf, 0x7f, 0xbe, 0x73, 0xba, 0xe7, 0xf4, 0xa8, 0xd1, 0x3a, 0xa3, + 0x27, 0xc2, 0x0d, 0x62, 0x01, 0x2c, 0xa6, 0xe1, 0x6e, 0xca, 0x12, 0x91, 0xe0, 0x65, 0x10, 0x9e, + 0xcf, 0x81, 0x4d, 0x80, 0xa5, 0x83, 0xd6, 0xc6, 0x30, 0x19, 0x26, 0x2a, 0xb0, 0x27, 0x7f, 0x69, + 0x4f, 0x6b, 0xad, 0xf0, 0x18, 0xa5, 0xc1, 0x52, 0xcf, 0xfc, 0xbc, 0x2f, 0x83, 0x7b, 0x34, 0x0d, + 0xf6, 0x22, 0x88, 0x06, 0xc0, 0xf8, 0x28, 0x48, 0xd3, 0x41, 0xe9, 0x41, 0xfb, 0xb6, 0x3f, 0x45, + 0x2b, 0x0e, 0x7c, 0x3e, 0x06, 0x2e, 0x1e, 0x02, 0xf5, 0x81, 0xe1, 0x55, 0x54, 0x3f, 0xec, 0x93, + 0xda, 0x56, 0x6d, 0x67, 0xc1, 0xa9, 0x1f, 0xf6, 0x71, 0x0b, 0x2d, 0x8d, 0xb9, 0x6c, 0x2d, 0x02, + 0x52, 0xdf, 0xaa, 0xed, 0x34, 0x9c, 0xfc, 0x19, 0xdf, 0x45, 0x2b, 0x74, 0x2c, 0x46, 0x2e, 0x83, + 0x49, 0xc0, 0x83, 0x24, 0x26, 0x57, 0x55, 0xda, 0xb2, 0x14, 0x1d, 0xa3, 0x6d, 0x3f, 0xc3, 0x68, + 0xfd, 0xd0, 0xac, 0xce, 0xa1, 0x27, 0xc2, 0x94, 0xc3, 0x0f, 0xd0, 0xe2, 0x48, 0x95, 0x24, 0xfe, + 0x56, 0x6d, 0xa7, 0xd9, 0xd9, 0xdc, 0x2d, 0xaf, 0x79, 0xb7, 0xd2, 0x95, 0x63, 0xac, 0x33, 0xdd, + 0xdd, 0x43, 0xf5, 0x49, 0x47, 0xf5, 0xd5, 0xec, 0xdc, 0xb2, 0x02, 0x9c, 0xfa, 0xa4, 0x83, 0xdf, + 0x42, 0xd7, 0x18, 0x8d, 0x87, 0xa0, 0x1a, 0x6c, 0x76, 0x5a, 0x53, 0x4e, 0x19, 0xca, 0xec, 0xda, + 0x88, 0x5f, 0x43, 0x57, 0xd3, 0xb1, 0x20, 0x0b, 0xca, 0x4f, 0xaa, 0xfe, 0xa3, 0x71, 0xb6, 0x08, + 0x47, 0x9a, 0x70, 0x0f, 0x2d, 0xfb, 0x10, 0x82, 0x00, 0x57, 0x17, 0xb9, 0xa6, 0x92, 0xb6, 0xaa, + 0x49, 0x7d, 0xe5, 0xa8, 0x94, 0x6a, 0xfa, 0x85, 0x26, 0x0b, 0x8a, 0xb3, 0x98, 0x2c, 0xda, 0x0a, + 0x1e, 0x9f, 0xc5, 0x79, 0x41, 0x71, 0x16, 0xe3, 0xb7, 0x11, 0xf2, 0x92, 0x28, 0xa5, 0x9e, 0x90, + 0x9b, 0x7e, 0x5d, 0xa5, 0xbc, 0x54, 0x4d, 0xe9, 0xe5, 0xf1, 0x2c, 0xb3, 0x94, 0x82, 0xdf, 0x41, + 0xcd, 0x10, 0x28, 0x07, 0x77, 0xc8, 0x68, 0x2c, 0xc8, 0x92, 0x8d, 0xf0, 0x48, 0x1a, 0x0e, 0x64, + 0x3c, 0x27, 0x84, 0xb9, 0x24, 0xd7, 0xac, 0x09, 0x0c, 0x26, 0xc9, 0x29, 0x90, 0x86, 0x6d, 0xcd, + 0x0a, 0xe1, 0x28, 0x43, 0xbe, 0xe6, 0xb0, 0xd0, 0xe4, 0x6b, 0xa1, 0x21, 0x65, 0x11, 0x41, 0xb6, + 0xd7, 0xd2, 0x95, 0xa1, 0xfc, 0xb5, 0x28, 0x23, 0x7e, 0x1f, 0xad, 0xe9, 0xb2, 0xde, 0x08, 0xbc, + 0xd3, 0x34, 0x09, 0x62, 0x41, 0x9a, 0x2a, 0xf9, 0x65, 0x4b, 0xe9, 0x5e, 0x6e, 0xca, 0x30, 0x37, + 0xc2, 0xaa, 0x8e, 0xbb, 0xa8, 0xa9, 0x46, 0x18, 0x62, 0x3a, 0x08, 0x81, 0xfc, 0x6d, 0xdd, 0xcc, + 0xee, 0x58, 0x8c, 0xf6, 0x95, 0x21, 0xdf, 0x0a, 0x9a, 0x4b, 0xb8, 0x8f, 0xd4, 0xc0, 0xbb, 0x7e, + 0xc0, 0x15, 0xe3, 0x9f, 0xeb, 0xb6, 0xbd, 0x90, 0x8c, 0xbe, 0x76, 0xe4, 0x7b, 0x41, 0x0b, 0x2d, + 0x6f, 0x84, 0x0b, 0x2a, 0xc6, 0x9c, 0xfc, 0x37, 0xb7, 0x91, 0x27, 0xca, 0x50, 0x69, 0x44, 0x4b, + 0xf8, 0xb1, 0x6e, 0x04, 0x62, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x57, 0x33, 0x5e, 0xad, 0x32, 0xb2, + 0xb3, 0xd8, 0x2d, 0x59, 0x33, 0x5a, 0x25, 0x1f, 0xef, 0x9b, 0xe3, 0x2d, 0xcf, 0xbb, 0x4b, 0x7d, + 0x9f, 0xfc, 0xb8, 0x34, 0x6f, 0x65, 0x1f, 0x70, 0x60, 0x5d, 0xdf, 0xaf, 0xac, 0xcc, 0x68, 0xf8, + 0x31, 0x5a, 0x2b, 0x30, 0x7a, 0xe4, 0xc9, 0x4f, 0x9a, 0x74, 0xd7, 0x4e, 0x32, 0x67, 0xc5, 0xc0, + 0x56, 0x69, 0x45, 0xae, 0xb6, 0x35, 0x04, 0x41, 0x7e, 0xbe, 0xb4, 0xad, 0x03, 0x10, 0x33, 0x6d, + 0x1d, 0x80, 0xc0, 0x43, 0xf4, 0x62, 0x81, 0xf1, 0x46, 0xf2, 0x10, 0xba, 0x29, 0xe5, 0xfc, 0x69, + 0xc2, 0x7c, 0xf2, 0x8b, 0x46, 0xbe, 0x6e, 0x47, 0xf6, 0x94, 0xfb, 0xc8, 0x98, 0x33, 0xfa, 0x0b, + 0xd4, 0x1a, 0xc6, 0x1f, 0xa3, 0x8d, 0x52, 0xbf, 0xf2, 0xf4, 0xb8, 0x2c, 0x09, 0x81, 0x9c, 0xeb, + 0x1a, 0xf7, 0xe7, 0xb4, 0xad, 0x4e, 0x5e, 0x52, 0x4c, 0xcb, 0x4d, 0x3a, 0x1d, 0xc1, 0x9f, 0xa0, + 0x5b, 0x05, 0x59, 0x1f, 0x44, 0x8d, 0xfe, 0x55, 0xa3, 0x5f, 0xb1, 0xa3, 0xcd, 0x89, 0x2c, 0xb1, + 0x31, 0x9d, 0x09, 0xe1, 0x87, 0x68, 0xb5, 0x80, 0x87, 0x01, 0x17, 0xe4, 0x37, 0x4d, 0xbd, 0x63, + 0xa7, 0x3e, 0x0a, 0xb8, 0xa8, 0xcc, 0x51, 0x26, 0xe6, 0x24, 0xd9, 0x9a, 0x26, 0xfd, 0x3e, 0x97, + 0x24, 0x4b, 0xcf, 0x90, 0x32, 0x31, 0x7f, 0xf5, 0x8a, 0x24, 0x27, 0xf2, 0x9b, 0xc6, 0xbc, 0x57, + 0x2f, 0x73, 0xa6, 0x27, 0xd2, 0x68, 0xf9, 0x44, 0x2a, 0x8c, 0x99, 0xc8, 0x6f, 0x1b, 0xf3, 0x26, + 0x52, 0x66, 0x59, 0x26, 0xb2, 0x90, 0xab, 0x6d, 0xc9, 0x89, 0xfc, 0xee, 0xd2, 0xb6, 0xa6, 0x27, + 0xd2, 0x68, 0xf8, 0x33, 0xd4, 0x2a, 0x61, 0xd4, 0xa0, 0xa4, 0xc0, 0xa2, 0x80, 0xab, 0xff, 0xd6, + 0xef, 0x35, 0xf3, 0x8d, 0x39, 0x4c, 0x69, 0x3f, 0xca, 0xdd, 0x19, 0xff, 0x36, 0xb5, 0xc7, 0x71, + 0x84, 0x36, 0x8b, 0x5a, 0x66, 0x74, 0x4a, 0xc5, 0x7e, 0xd0, 0xc5, 0xde, 0xb4, 0x17, 0xd3, 0x53, + 0x32, 0x5b, 0x8d, 0xd0, 0x39, 0x06, 0xfc, 0x11, 0x5a, 0xf7, 0xc2, 0x31, 0x17, 0xc0, 0xdc, 0x09, + 0x30, 0x29, 0xb9, 0x1c, 0x04, 0x79, 0x86, 0xcc, 0x11, 0x28, 0x5f, 0x52, 0x76, 0x7b, 0xda, 0xf9, + 0xa1, 0x36, 0x3e, 0x29, 0x76, 0xeb, 0xa6, 0x37, 0x1d, 0xc1, 0x14, 0xdd, 0xce, 0xc0, 0x9a, 0xe1, + 0x52, 0x21, 0x98, 0x82, 0x7f, 0x89, 0xcc, 0xe7, 0xcf, 0x06, 0x7f, 0x4f, 0x69, 0x5d, 0x21, 0x58, + 0x89, 0xbf, 0xe1, 0x59, 0x82, 0xf8, 0x18, 0x61, 0x3f, 0x79, 0x1a, 0x0f, 0x19, 0xf5, 0xc1, 0x0d, + 0xe2, 0x93, 0x44, 0xd1, 0xbf, 0xd2, 0xf4, 0x7b, 0x55, 0x7a, 0x3f, 0x33, 0x1e, 0xc6, 0x27, 0x49, + 0x89, 0xbc, 0xe6, 0x4f, 0x05, 0xb6, 0x6f, 0xa0, 0x95, 0xfd, 0x28, 0x15, 0x5f, 0x38, 0xc0, 0xd3, + 0x24, 0xe6, 0xb0, 0x9d, 0xa2, 0xcd, 0x4b, 0x3e, 0xcd, 0x18, 0xa3, 0x05, 0x75, 0x07, 0xab, 0xa9, + 0x3b, 0x98, 0xfa, 0x2d, 0xef, 0x66, 0xf9, 0x17, 0xcb, 0xdc, 0xcd, 0xb2, 0x67, 0x7c, 0x07, 0x2d, + 0xf3, 0x20, 0x4a, 0x43, 0x70, 0x45, 0x72, 0x0a, 0xfa, 0x6a, 0xd6, 0x70, 0x9a, 0x5a, 0x3b, 0x96, + 0xd2, 0xbb, 0x1b, 0xcf, 0xff, 0x6c, 0x5f, 0x79, 0x7e, 0xd1, 0xae, 0x9d, 0x5f, 0xb4, 0x6b, 0x7f, + 0x5c, 0xb4, 0x6b, 0x5f, 0xff, 0xd5, 0xbe, 0x32, 0x58, 0x54, 0x17, 0xc3, 0x07, 0xff, 0x07, 0x00, + 0x00, 0xff, 0xff, 0x94, 0x6f, 0x64, 0x0a, 0x98, 0x0a, 0x00, 0x00, +} + +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AuthRevision != 0 { + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) + i-- + dAtA[i] = 0x18 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + } + if m.ID != 0 { + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DowngradeInfoSet != nil { + { + size, err := m.DowngradeInfoSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x51 + i-- + dAtA[i] = 0xb2 + } + if m.ClusterMemberAttrSet != nil { + { + size, err := m.ClusterMemberAttrSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x51 + i-- + dAtA[i] = 0xaa + } + if m.ClusterVersionSet != nil { + { + size, err := m.ClusterVersionSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x51 + i-- + dAtA[i] = 0xa2 + } + if m.AuthRoleRevokePermission != nil { + { + size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4b + i-- + dAtA[i] = 0xa2 + } + if m.AuthRoleGrantPermission != nil { + { + size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4b + i-- + dAtA[i] = 0x9a + } + if m.AuthRoleGet != nil { + { + size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4b + i-- + dAtA[i] = 0x92 + } + if m.AuthRoleDelete != nil { + { + size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4b + i-- + dAtA[i] = 0x8a + } + if m.AuthRoleAdd != nil { + { + size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4b + i-- + dAtA[i] = 0x82 + } + if m.AuthRoleList != nil { + { + size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x45 + i-- + dAtA[i] = 0x9a + } + if m.AuthUserList != nil { + { + size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x45 + i-- + dAtA[i] = 0x92 + } + if m.AuthUserRevokeRole != nil { + { + size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x45 + i-- + dAtA[i] = 0x8a + } + if m.AuthUserGrantRole != nil { + { + size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x45 + i-- + dAtA[i] = 0x82 + } + if m.AuthUserChangePassword != nil { + { + size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x44 + i-- + dAtA[i] = 0xfa + } + if m.AuthUserGet != nil { + { + size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x44 + i-- + dAtA[i] = 0xf2 + } + if m.AuthUserDelete != nil { + { + size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x44 + i-- + dAtA[i] = 0xea + } + if m.AuthUserAdd != nil { + { + size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x44 + i-- + dAtA[i] = 0xe2 + } + if m.AuthStatus != nil { + { + size, err := m.AuthStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3f + i-- + dAtA[i] = 0xaa + } + if m.Authenticate != nil { + { + size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3f + i-- + dAtA[i] = 0xa2 + } + if m.AuthDisable != nil { + { + size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3f + i-- + dAtA[i] = 0x9a + } + if m.AuthEnable != nil { + { + size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc2 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } + if m.LeaseCheckpoint != nil { + { + size, err := m.LeaseCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.Alarm != nil { + { + size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.LeaseRevoke != nil { + { + size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.LeaseGrant != nil { + { + size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Compaction != nil { + { + size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Txn != nil { + { + size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.DeleteRange != nil { + { + size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Put != nil { + { + size, err := m.Put.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Range != nil { + { + size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.V2 != nil { + { + size, err := m.V2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ID != 0 { + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SimpleToken) > 0 { + i -= len(m.SimpleToken) + copy(dAtA[i:], m.SimpleToken) + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i-- + dAtA[i] = 0x1a + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { + offset -= sovRaftInternal(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RequestHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + if m.V2 != nil { + l = m.V2.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.DeleteRange != nil { + l = m.DeleteRange.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Txn != nil { + l = m.Txn.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Compaction != nil { + l = m.Compaction.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseGrant != nil { + l = m.LeaseGrant.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseRevoke != nil { + l = m.LeaseRevoke.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Alarm != nil { + l = m.Alarm.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseCheckpoint != nil { + l = m.LeaseCheckpoint.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthEnable != nil { + l = m.AuthEnable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthDisable != nil { + l = m.AuthDisable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.Authenticate != nil { + l = m.Authenticate.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthStatus != nil { + l = m.AuthStatus.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserAdd != nil { + l = m.AuthUserAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserDelete != nil { + l = m.AuthUserDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGet != nil { + l = m.AuthUserGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserChangePassword != nil { + l = m.AuthUserChangePassword.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGrantRole != nil { + l = m.AuthUserGrantRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserRevokeRole != nil { + l = m.AuthUserRevokeRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserList != nil { + l = m.AuthUserList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleList != nil { + l = m.AuthRoleList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleAdd != nil { + l = m.AuthRoleAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleDelete != nil { + l = m.AuthRoleDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGet != nil { + l = m.AuthRoleGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGrantPermission != nil { + l = m.AuthRoleGrantPermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleRevokePermission != nil { + l = m.AuthRoleRevokePermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.ClusterVersionSet != nil { + l = m.ClusterVersionSet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.ClusterMemberAttrSet != nil { + l = m.ClusterMemberAttrSet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.DowngradeInfoSet != nil { + l = m.DowngradeInfoSet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EmptyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.SimpleToken) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaftInternal(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRaftInternal(x uint64) (n int) { + return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.V2 == nil { + m.V2 = &Request{} + } + if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &RangeRequest{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteRange == nil { + m.DeleteRange = &DeleteRangeRequest{} + } + if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Txn == nil { + m.Txn = &TxnRequest{} + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Compaction == nil { + m.Compaction = &CompactionRequest{} + } + if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseGrant == nil { + m.LeaseGrant = &LeaseGrantRequest{} + } + if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseRevoke == nil { + m.LeaseRevoke = &LeaseRevokeRequest{} + } + if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alarm == nil { + m.Alarm = &AlarmRequest{} + } + if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseCheckpoint == nil { + m.LeaseCheckpoint = &LeaseCheckpointRequest{} + } + if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthEnable == nil { + m.AuthEnable = &AuthEnableRequest{} + } + if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1011: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthDisable == nil { + m.AuthDisable = &AuthDisableRequest{} + } + if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1012: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authenticate == nil { + m.Authenticate = &InternalAuthenticateRequest{} + } + if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1013: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthStatus == nil { + m.AuthStatus = &AuthStatusRequest{} + } + if err := m.AuthStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserAdd == nil { + m.AuthUserAdd = &AuthUserAddRequest{} + } + if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserDelete == nil { + m.AuthUserDelete = &AuthUserDeleteRequest{} + } + if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGet == nil { + m.AuthUserGet = &AuthUserGetRequest{} + } + if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1103: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserChangePassword == nil { + m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} + } + if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1104: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGrantRole == nil { + m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} + } + if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1105: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserRevokeRole == nil { + m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} + } + if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1106: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserList == nil { + m.AuthUserList = &AuthUserListRequest{} + } + if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1107: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleList == nil { + m.AuthRoleList = &AuthRoleListRequest{} + } + if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1200: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleAdd == nil { + m.AuthRoleAdd = &AuthRoleAddRequest{} + } + if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1201: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleDelete == nil { + m.AuthRoleDelete = &AuthRoleDeleteRequest{} + } + if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1202: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGet == nil { + m.AuthRoleGet = &AuthRoleGetRequest{} + } + if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1203: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGrantPermission == nil { + m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} + } + if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1204: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleRevokePermission == nil { + m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} + } + if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1300: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersionSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersionSet == nil { + m.ClusterVersionSet = &membershippb.ClusterVersionSetRequest{} + } + if err := m.ClusterVersionSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1301: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterMemberAttrSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterMemberAttrSet == nil { + m.ClusterMemberAttrSet = &membershippb.ClusterMemberAttrSetRequest{} + } + if err := m.ClusterMemberAttrSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1302: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfoSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DowngradeInfoSet == nil { + m.DowngradeInfoSet = &membershippb.DowngradeInfoSetRequest{} + } + if err := m.DowngradeInfoSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRaftInternal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SimpleToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftInternal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRaftInternal + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRaftInternal + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRaftInternal + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto new file mode 100644 index 0000000000..68926e59f6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcdserver.proto"; +import "rpc.proto"; +import "etcd/api/membershippb/membership.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message RequestHeader { + uint64 ID = 1; + // username is a username that is associated with an auth token of gRPC connection + string username = 2; + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + uint64 auth_revision = 3; +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +message InternalRaftRequest { + RequestHeader header = 100; + uint64 ID = 1; + + Request v2 = 2; + + RangeRequest range = 3; + PutRequest put = 4; + DeleteRangeRequest delete_range = 5; + TxnRequest txn = 6; + CompactionRequest compaction = 7; + + LeaseGrantRequest lease_grant = 8; + LeaseRevokeRequest lease_revoke = 9; + + AlarmRequest alarm = 10; + + LeaseCheckpointRequest lease_checkpoint = 11; + + AuthEnableRequest auth_enable = 1000; + AuthDisableRequest auth_disable = 1011; + AuthStatusRequest auth_status = 1013; + + InternalAuthenticateRequest authenticate = 1012; + + AuthUserAddRequest auth_user_add = 1100; + AuthUserDeleteRequest auth_user_delete = 1101; + AuthUserGetRequest auth_user_get = 1102; + AuthUserChangePasswordRequest auth_user_change_password = 1103; + AuthUserGrantRoleRequest auth_user_grant_role = 1104; + AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; + AuthUserListRequest auth_user_list = 1106; + AuthRoleListRequest auth_role_list = 1107; + + AuthRoleAddRequest auth_role_add = 1200; + AuthRoleDeleteRequest auth_role_delete = 1201; + AuthRoleGetRequest auth_role_get = 1202; + AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; + AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; + + membershippb.ClusterVersionSetRequest cluster_version_set = 1300; + membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301; + membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302; +} + +message EmptyResponse { +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +message InternalAuthenticateRequest { + string name = 1; + string password = 2; + + // simple_token is generated in API layer (etcdserver/v3_server.go) + string simple_token = 3; +} diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go new file mode 100644 index 0000000000..31e121ee0a --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go @@ -0,0 +1,183 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserverpb + +import ( + "fmt" + "strings" + + proto "github.com/golang/protobuf/proto" +) + +// InternalRaftStringer implements custom proto Stringer: +// redact password, replace value fields with value_size fields. +type InternalRaftStringer struct { + Request *InternalRaftRequest +} + +func (as *InternalRaftStringer) String() string { + switch { + case as.Request.LeaseGrant != nil: + return fmt.Sprintf("header:<%s> lease_grant:", + as.Request.Header.String(), + as.Request.LeaseGrant.TTL, + as.Request.LeaseGrant.ID, + ) + case as.Request.LeaseRevoke != nil: + return fmt.Sprintf("header:<%s> lease_revoke:", + as.Request.Header.String(), + as.Request.LeaseRevoke.ID, + ) + case as.Request.Authenticate != nil: + return fmt.Sprintf("header:<%s> authenticate:", + as.Request.Header.String(), + as.Request.Authenticate.Name, + as.Request.Authenticate.SimpleToken, + ) + case as.Request.AuthUserAdd != nil: + return fmt.Sprintf("header:<%s> auth_user_add:", + as.Request.Header.String(), + as.Request.AuthUserAdd.Name, + ) + case as.Request.AuthUserChangePassword != nil: + return fmt.Sprintf("header:<%s> auth_user_change_password:", + as.Request.Header.String(), + as.Request.AuthUserChangePassword.Name, + ) + case as.Request.Put != nil: + return fmt.Sprintf("header:<%s> put:<%s>", + as.Request.Header.String(), + NewLoggablePutRequest(as.Request.Put).String(), + ) + case as.Request.Txn != nil: + return fmt.Sprintf("header:<%s> txn:<%s>", + as.Request.Header.String(), + NewLoggableTxnRequest(as.Request.Txn).String(), + ) + default: + // nothing to redact + } + return as.Request.String() +} + +// txnRequestStringer implements a custom proto String to replace value bytes fields with value size +// fields in any nested txn and put operations. +type txnRequestStringer struct { + Request *TxnRequest +} + +func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { + return &txnRequestStringer{request} +} + +func (as *txnRequestStringer) String() string { + var compare []string + for _, c := range as.Request.Compare { + switch cv := c.TargetUnion.(type) { + case *Compare_Value: + compare = append(compare, newLoggableValueCompare(c, cv).String()) + default: + // nothing to redact + compare = append(compare, c.String()) + } + } + var success []string + for _, s := range as.Request.Success { + success = append(success, newLoggableRequestOp(s).String()) + } + var failure []string + for _, f := range as.Request.Failure { + failure = append(failure, newLoggableRequestOp(f).String()) + } + return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>", + strings.Join(compare, " "), + strings.Join(success, " "), + strings.Join(failure, " "), + ) +} + +// requestOpStringer implements a custom proto String to replace value bytes fields with value +// size fields in any nested txn and put operations. +type requestOpStringer struct { + Op *RequestOp +} + +func newLoggableRequestOp(op *RequestOp) *requestOpStringer { + return &requestOpStringer{op} +} + +func (as *requestOpStringer) String() string { + switch op := as.Op.Request.(type) { + case *RequestOp_RequestPut: + return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String()) + case *RequestOp_RequestTxn: + return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String()) + default: + // nothing to redact + } + return as.Op.String() +} + +// loggableValueCompare implements a custom proto String for Compare.Value union member types to +// replace the value bytes field with a value size field. +// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here. +type loggableValueCompare struct { + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"` + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"` + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"` +} + +func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare { + return &loggableValueCompare{ + c.Result, + c.Target, + c.Key, + int64(len(cv.Value)), + c.RangeEnd, + } +} + +func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} } +func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) } +func (*loggableValueCompare) ProtoMessage() {} + +// loggablePutRequest implements a custom proto String to replace value bytes field with a value +// size field. +// To preserve proto encoding of the key bytes, a faked out proto type is used here. +type loggablePutRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` + ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"` + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"` + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"` + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"` + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"` +} + +func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest { + return &loggablePutRequest{ + request.Key, + int64(len(request.Value)), + request.Lease, + request.PrevKv, + request.IgnoreValue, + request.IgnoreLease, + } +} + +func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} } +func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) } +func (*loggablePutRequest) ProtoMessage() {} diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go new file mode 100644 index 0000000000..8120907d91 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go @@ -0,0 +1,25862 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc.proto + +package etcdserverpb + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + authpb "go.etcd.io/etcd/api/v3/authpb" + mvccpb "go.etcd.io/etcd/api/v3/mvccpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 + AlarmType_CORRUPT AlarmType = 2 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", + 2: "CORRUPT", +} + +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, + "CORRUPT": 2, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} + +func (AlarmType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} + +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} + +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1, 0} +} + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} + +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} + +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1, 1} +} + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} + +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} + +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9, 0} +} + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 + Compare_LEASE Compare_CompareTarget = 4 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", + 4: "LEASE", +} + +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, + "LEASE": 4, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} + +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9, 1} +} + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} + +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} + +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{21, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} + +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} + +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{54, 0} +} + +type DowngradeRequest_DowngradeAction int32 + +const ( + DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0 + DowngradeRequest_ENABLE DowngradeRequest_DowngradeAction = 1 + DowngradeRequest_CANCEL DowngradeRequest_DowngradeAction = 2 +) + +var DowngradeRequest_DowngradeAction_name = map[int32]string{ + 0: "VALIDATE", + 1: "ENABLE", + 2: "CANCEL", +} + +var DowngradeRequest_DowngradeAction_value = map[string]int32{ + "VALIDATE": 0, + "ENABLE": 1, + "CANCEL": 2, +} + +func (x DowngradeRequest_DowngradeAction) String() string { + return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x)) +} + +func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{57, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // member_id is the ID of the member which sent the response. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} +func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseHeader.Merge(m, src) +} +func (m *ResponseHeader) XXX_Size() int { + return m.Size() +} +func (m *ResponseHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // sort_order is the order for returned sorted results. + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + // keys_only when set returns only the keys and not the values. + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` + // count_only when set returns only the count of the keys in the range. + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} +} +func (m *RangeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RangeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeRequest.Merge(m, src) +} +func (m *RangeRequest) XXX_Size() int { + return m.Size() +} +func (m *RangeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RangeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeRequest proto.InternalMessageInfo + +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"` + // more indicates if there are more keys to return in the requested range. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` + // count is set to the number of keys within the range when requested. + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} +} +func (m *RangeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RangeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeResponse.Merge(m, src) +} +func (m *RangeResponse) XXX_Size() int { + return m.Size() +} +func (m *RangeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RangeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeResponse proto.InternalMessageInfo + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} +} +func (m *PutRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutRequest.Merge(m, src) +} +func (m *PutRequest) XXX_Size() int { + return m.Size() +} +func (m *PutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutRequest proto.InternalMessageInfo + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{4} +} +func (m *PutResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutResponse.Merge(m, src) +} +func (m *PutResponse) XXX_Size() int { + return m.Size() +} +func (m *PutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutResponse proto.InternalMessageInfo + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // key is the first key to delete in the range. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{5} +} +func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRangeRequest.Merge(m, src) +} +func (m *DeleteRangeRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteRangeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo + +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{6} +} +func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRangeResponse.Merge(m, src) +} +func (m *DeleteRangeResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteRangeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + // *RequestOp_RequestRange + // *RequestOp_RequestPut + // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn + Request isRequestOp_Request `protobuf_oneof:"request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} +func (m *RequestOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestOp.Merge(m, src) +} +func (m *RequestOp) XXX_Size() int { + return m.Size() +} +func (m *RequestOp) XXX_DiscardUnknown() { + xxx_messageInfo_RequestOp.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestOp proto.InternalMessageInfo + +type isRequestOp_Request interface { + isRequestOp_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"` +} +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} + +func (m *RequestOp) GetRequest() isRequestOp_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { + return x.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { + return x.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { + return x.RequestDeleteRange + } + return nil +} + +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RequestOp) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), + } +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn + Response isResponseOp_Response `protobuf_oneof:"response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{8} +} +func (m *ResponseOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseOp.Merge(m, src) +} +func (m *ResponseOp) XXX_Size() int { + return m.Size() +} +func (m *ResponseOp) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseOp proto.InternalMessageInfo + +type isResponseOp_Response interface { + isResponseOp_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"` +} +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponse() isResponseOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { + return x.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { + return x.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { + return x.ResponseDeleteRange + } + return nil +} + +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), + } +} + +type Compare struct { + // result is logical comparison operation for this comparison. + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + // target is the key-value field to inspect for the comparison. + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + // key is the subject key for the comparison operation. + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + // *Compare_Lease + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9} +} +func (m *Compare) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Compare.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Compare) XXX_Merge(src proto.Message) { + xxx_messageInfo_Compare.Merge(m, src) +} +func (m *Compare) XXX_Size() int { + return m.Size() +} +func (m *Compare) XXX_DiscardUnknown() { + xxx_messageInfo_Compare.DiscardUnknown(m) +} + +var xxx_messageInfo_Compare proto.InternalMessageInfo + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"` +} +type Compare_Lease struct { + Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} +func (*Compare_Lease) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +func (m *Compare) GetLease() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { + return x.Lease + } + return 0 +} + +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Compare) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + (*Compare_Lease)(nil), + } +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{10} +} +func (m *TxnRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxnRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxnRequest.Merge(m, src) +} +func (m *TxnRequest) XXX_Size() int { + return m.Size() +} +func (m *TxnRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TxnRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TxnRequest proto.InternalMessageInfo + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{11} +} +func (m *TxnResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxnResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxnResponse.Merge(m, src) +} +func (m *TxnResponse) XXX_Size() int { + return m.Size() +} +func (m *TxnResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TxnResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TxnResponse proto.InternalMessageInfo + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +type CompactionRequest struct { + // revision is the key-value store revision for the compaction operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{12} +} +func (m *CompactionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactionRequest.Merge(m, src) +} +func (m *CompactionRequest) XXX_Size() int { + return m.Size() +} +func (m *CompactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{13} +} +func (m *CompactionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompactionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactionResponse.Merge(m, src) +} +func (m *CompactionResponse) XXX_Size() int { + return m.Size() +} +func (m *CompactionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CompactionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{14} +} +func (m *HashRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashRequest.Merge(m, src) +} +func (m *HashRequest) XXX_Size() int { + return m.Size() +} +func (m *HashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HashRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HashRequest proto.InternalMessageInfo + +type HashKVRequest struct { + // revision is the key-value store revision for the hash operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{15} +} +func (m *HashKVRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashKVRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashKVRequest.Merge(m, src) +} +func (m *HashKVRequest) XXX_Size() int { + return m.Size() +} +func (m *HashKVRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HashKVRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo + +func (m *HashKVRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +type HashKVResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + // compact_revision is the compacted revision of key-value store when hash begins. + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{16} +} +func (m *HashKVResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashKVResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashKVResponse.Merge(m, src) +} +func (m *HashKVResponse) XXX_Size() int { + return m.Size() +} +func (m *HashKVResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HashKVResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo + +func (m *HashKVResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashKVResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +func (m *HashKVResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // hash is the hash value computed from the responding member's KV's backend. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{17} +} +func (m *HashResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashResponse.Merge(m, src) +} +func (m *HashResponse) XXX_Size() int { + return m.Size() +} +func (m *HashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HashResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HashResponse proto.InternalMessageInfo + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{18} +} +func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotRequest.Merge(m, src) +} +func (m *SnapshotRequest) XXX_Size() int { + return m.Size() +} +func (m *SnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // remaining_bytes is the number of blob bytes to be sent after this message + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{19} +} +func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotResponse.Merge(m, src) +} +func (m *SnapshotResponse) XXX_Size() int { + return m.Size() +} +func (m *SnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + // request_union is a request to either create a new watcher or cancel an existing watcher. + // + // Types that are valid to be assigned to RequestUnion: + // *WatchRequest_CreateRequest + // *WatchRequest_CancelRequest + // *WatchRequest_ProgressRequest + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{20} +} +func (m *WatchRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchRequest.Merge(m, src) +} +func (m *WatchRequest) XXX_Size() int { + return m.Size() +} +func (m *WatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchRequest proto.InternalMessageInfo + +type isWatchRequest_RequestUnion interface { + isWatchRequest_RequestUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"` +} +type WatchRequest_ProgressRequest struct { + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { + if m != nil { + return m.RequestUnion + } + return nil +} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { + return x.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { + return x.CancelRequest + } + return nil +} + +func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok { + return x.ProgressRequest + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + (*WatchRequest_ProgressRequest)(nil), + } +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // fragment enables splitting large revisions into multiple watch responses. + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{21} +} +func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchCreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchCreateRequest.Merge(m, src) +} +func (m *WatchCreateRequest) XXX_Size() int { + return m.Size() +} +func (m *WatchCreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *WatchCreateRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchCreateRequest) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{22} +} +func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchCancelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchCancelRequest.Merge(m, src) +} +func (m *WatchCancelRequest) XXX_Size() int { + return m.Size() +} +func (m *WatchCancelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +type WatchProgressRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{23} +} +func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchProgressRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchProgressRequest.Merge(m, src) +} +func (m *WatchProgressRequest) XXX_Size() int { + return m.Size() +} +func (m *WatchProgressRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo + +type WatchResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // watch_id is the ID of the watcher that corresponds to the response. + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + // framgment is true if large watch response was split over multiple responses. + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{24} +} +func (m *WatchResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchResponse.Merge(m, src) +} +func (m *WatchResponse) XXX_Size() int { + return m.Size() +} +func (m *WatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchResponse proto.InternalMessageInfo + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{25} +} +func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseGrantRequest.Merge(m, src) +} +func (m *LeaseGrantRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseGrantRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{26} +} +func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseGrantResponse.Merge(m, src) +} +func (m *LeaseGrantResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseGrantResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{27} +} +func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseRevokeRequest.Merge(m, src) +} +func (m *LeaseRevokeRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseRevokeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{28} +} +func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseRevokeResponse.Merge(m, src) +} +func (m *LeaseRevokeResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseRevokeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseCheckpoint struct { + // ID is the lease ID to checkpoint. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Remaining_TTL is the remaining time until expiry of the lease. + Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{29} +} +func (m *LeaseCheckpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseCheckpoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseCheckpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCheckpoint.Merge(m, src) +} +func (m *LeaseCheckpoint) XXX_Size() int { + return m.Size() +} +func (m *LeaseCheckpoint) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCheckpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCheckpoint proto.InternalMessageInfo + +func (m *LeaseCheckpoint) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.Remaining_TTL + } + return 0 +} + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{30} +} +func (m *LeaseCheckpointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseCheckpointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseCheckpointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCheckpointRequest.Merge(m, src) +} +func (m *LeaseCheckpointRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseCheckpointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCheckpointRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCheckpointRequest proto.InternalMessageInfo + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{31} +} +func (m *LeaseCheckpointResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseCheckpointResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseCheckpointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCheckpointResponse.Merge(m, src) +} +func (m *LeaseCheckpointResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseCheckpointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCheckpointResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCheckpointResponse proto.InternalMessageInfo + +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{32} +} +func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src) +} +func (m *LeaseKeepAliveRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{33} +} +func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src) +} +func (m *LeaseKeepAliveResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{34} +} +func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src) +} +func (m *LeaseTimeToLiveRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{35} +} +func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src) +} +func (m *LeaseTimeToLiveResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type LeaseLeasesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{36} +} +func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseLeasesRequest.Merge(m, src) +} +func (m *LeaseLeasesRequest) XXX_Size() int { + return m.Size() +} +func (m *LeaseLeasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo + +type LeaseStatus struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{37} +} +func (m *LeaseStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseStatus.Merge(m, src) +} +func (m *LeaseStatus) XXX_Size() int { + return m.Size() +} +func (m *LeaseStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo + +func (m *LeaseStatus) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseLeasesResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{38} +} +func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseLeasesResponse.Merge(m, src) +} +func (m *LeaseLeasesResponse) XXX_Size() int { + return m.Size() +} +func (m *LeaseLeasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo + +func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { + if m != nil { + return m.Leases + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{39} +} +func (m *Member) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Member.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Member) XXX_Merge(src proto.Message) { + xxx_messageInfo_Member.Merge(m, src) +} +func (m *Member) XXX_Size() int { + return m.Size() +} +func (m *Member) XXX_DiscardUnknown() { + xxx_messageInfo_Member.DiscardUnknown(m) +} + +var xxx_messageInfo_Member proto.InternalMessageInfo + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + // isLearner indicates if the added member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{40} +} +func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberAddRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberAddRequest.Merge(m, src) +} +func (m *MemberAddRequest) XXX_Size() int { + return m.Size() +} +func (m *MemberAddRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MemberAddRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{41} +} +func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberAddResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberAddResponse.Merge(m, src) +} +func (m *MemberAddResponse) XXX_Size() int { + return m.Size() +} +func (m *MemberAddResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MemberAddResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{42} +} +func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberRemoveRequest.Merge(m, src) +} +func (m *MemberRemoveRequest) XXX_Size() int { + return m.Size() +} +func (m *MemberRemoveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{43} +} +func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberRemoveResponse.Merge(m, src) +} +func (m *MemberRemoveResponse) XXX_Size() int { + return m.Size() +} +func (m *MemberRemoveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{44} +} +func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberUpdateRequest.Merge(m, src) +} +func (m *MemberUpdateRequest) XXX_Size() int { + return m.Size() +} +func (m *MemberUpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{45} +} +func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberUpdateResponse.Merge(m, src) +} +func (m *MemberUpdateResponse) XXX_Size() int { + return m.Size() +} +func (m *MemberUpdateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { + Linearizable bool `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{46} +} +func (m *MemberListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberListRequest.Merge(m, src) +} +func (m *MemberListRequest) XXX_Size() int { + return m.Size() +} +func (m *MemberListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MemberListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo + +func (m *MemberListRequest) GetLinearizable() bool { + if m != nil { + return m.Linearizable + } + return false +} + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{47} +} +func (m *MemberListResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberListResponse.Merge(m, src) +} +func (m *MemberListResponse) XXX_Size() int { + return m.Size() +} +func (m *MemberListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MemberListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{48} +} +func (m *MemberPromoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberPromoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberPromoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberPromoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberPromoteRequest.Merge(m, src) +} +func (m *MemberPromoteRequest) XXX_Size() int { + return m.Size() +} +func (m *MemberPromoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MemberPromoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberPromoteRequest proto.InternalMessageInfo + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{49} +} +func (m *MemberPromoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemberPromoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemberPromoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemberPromoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberPromoteResponse.Merge(m, src) +} +func (m *MemberPromoteResponse) XXX_Size() int { + return m.Size() +} +func (m *MemberPromoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MemberPromoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberPromoteResponse proto.InternalMessageInfo + +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{50} +} +func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DefragmentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefragmentRequest.Merge(m, src) +} +func (m *DefragmentRequest) XXX_Size() int { + return m.Size() +} +func (m *DefragmentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DefragmentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{51} +} +func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DefragmentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefragmentResponse.Merge(m, src) +} +func (m *DefragmentResponse) XXX_Size() int { + return m.Size() +} +func (m *DefragmentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DefragmentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{52} +} +func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MoveLeaderRequest.Merge(m, src) +} +func (m *MoveLeaderRequest) XXX_Size() int { + return m.Size() +} +func (m *MoveLeaderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{53} +} +func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MoveLeaderResponse.Merge(m, src) +} +func (m *MoveLeaderResponse) XXX_Size() int { + return m.Size() +} +func (m *MoveLeaderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{54} +} +func (m *AlarmRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlarmRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlarmRequest.Merge(m, src) +} +func (m *AlarmRequest) XXX_Size() int { + return m.Size() +} +func (m *AlarmRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AlarmRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + // memberID is the ID of the member associated with the raised alarm. + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm which has been raised. + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{55} +} +func (m *AlarmMember) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlarmMember) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlarmMember.Merge(m, src) +} +func (m *AlarmMember) XXX_Size() int { + return m.Size() +} +func (m *AlarmMember) XXX_DiscardUnknown() { + xxx_messageInfo_AlarmMember.DiscardUnknown(m) +} + +var xxx_messageInfo_AlarmMember proto.InternalMessageInfo + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{56} +} +func (m *AlarmResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlarmResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlarmResponse.Merge(m, src) +} +func (m *AlarmResponse) XXX_Size() int { + return m.Size() +} +func (m *AlarmResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AlarmResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type DowngradeRequest struct { + // action is the kind of downgrade request to issue. The action may + // VALIDATE the target version, DOWNGRADE the cluster version, + // or CANCEL the current downgrading job. + Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"` + // version is the target version to downgrade. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DowngradeRequest) Reset() { *m = DowngradeRequest{} } +func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) } +func (*DowngradeRequest) ProtoMessage() {} +func (*DowngradeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{57} +} +func (m *DowngradeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DowngradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DowngradeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DowngradeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DowngradeRequest.Merge(m, src) +} +func (m *DowngradeRequest) XXX_Size() int { + return m.Size() +} +func (m *DowngradeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DowngradeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DowngradeRequest proto.InternalMessageInfo + +func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction { + if m != nil { + return m.Action + } + return DowngradeRequest_VALIDATE +} + +func (m *DowngradeRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type DowngradeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // version is the current cluster version. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DowngradeResponse) Reset() { *m = DowngradeResponse{} } +func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) } +func (*DowngradeResponse) ProtoMessage() {} +func (*DowngradeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{58} +} +func (m *DowngradeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DowngradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DowngradeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DowngradeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DowngradeResponse.Merge(m, src) +} +func (m *DowngradeResponse) XXX_Size() int { + return m.Size() +} +func (m *DowngradeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DowngradeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DowngradeResponse proto.InternalMessageInfo + +func (m *DowngradeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DowngradeResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type StatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{59} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft committed index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{60} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} + +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse + } + return 0 +} + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type AuthEnableRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{61} +} +func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthEnableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthEnableRequest.Merge(m, src) +} +func (m *AuthEnableRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthEnableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo + +type AuthDisableRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{62} +} +func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthDisableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthDisableRequest.Merge(m, src) +} +func (m *AuthDisableRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthDisableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo + +type AuthStatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthStatusRequest) Reset() { *m = AuthStatusRequest{} } +func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) } +func (*AuthStatusRequest) ProtoMessage() {} +func (*AuthStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{63} +} +func (m *AuthStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthStatusRequest.Merge(m, src) +} +func (m *AuthStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthStatusRequest proto.InternalMessageInfo + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{64} +} +func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthenticateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthenticateRequest.Merge(m, src) +} +func (m *AuthenticateRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthenticateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` + HashedPassword string `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{65} +} +func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserAddRequest.Merge(m, src) +} +func (m *AuthUserAddRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserAddRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *AuthUserAddRequest) GetHashedPassword() string { + if m != nil { + return m.HashedPassword + } + return "" +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{66} +} +func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserGetRequest.Merge(m, src) +} +func (m *AuthUserGetRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{67} +} +func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src) +} +func (m *AuthUserDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. Note that this field will be removed in the API layer. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. + HashedPassword string `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{68} +} +func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src) +} +func (m *AuthUserChangePasswordRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetHashedPassword() string { + if m != nil { + return m.HashedPassword + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{69} +} +func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src) +} +func (m *AuthUserGrantRoleRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{70} +} +func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src) +} +func (m *AuthUserRevokeRoleRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{71} +} +func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleAddRequest.Merge(m, src) +} +func (m *AuthRoleAddRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleAddRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{72} +} +func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleGetRequest.Merge(m, src) +} +func (m *AuthRoleGetRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{73} +} +func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserListRequest.Merge(m, src) +} +func (m *AuthUserListRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthUserListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo + +type AuthRoleListRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{74} +} +func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleListRequest.Merge(m, src) +} +func (m *AuthRoleListRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{75} +} +func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src) +} +func (m *AuthRoleDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // perm is the permission to grant to the role. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{76} +} +func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src) +} +func (m *AuthRoleGrantPermissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{77} +} +func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src) +} +func (m *AuthRoleRevokePermissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{78} +} +func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthEnableResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthEnableResponse.Merge(m, src) +} +func (m *AuthEnableResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthEnableResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{79} +} +func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthDisableResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthDisableResponse.Merge(m, src) +} +func (m *AuthDisableResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthDisableResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthStatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + // authRevision is the current revision of auth store + AuthRevision uint64 `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthStatusResponse) Reset() { *m = AuthStatusResponse{} } +func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) } +func (*AuthStatusResponse) ProtoMessage() {} +func (*AuthStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{80} +} +func (m *AuthStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthStatusResponse.Merge(m, src) +} +func (m *AuthStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthStatusResponse proto.InternalMessageInfo + +func (m *AuthStatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthStatusResponse) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *AuthStatusResponse) GetAuthRevision() uint64 { + if m != nil { + return m.AuthRevision + } + return 0 +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{81} +} +func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthenticateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthenticateResponse.Merge(m, src) +} +func (m *AuthenticateResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthenticateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{82} +} +func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserAddResponse.Merge(m, src) +} +func (m *AuthUserAddResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserAddResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{83} +} +func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserGetResponse.Merge(m, src) +} +func (m *AuthUserGetResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserGetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{84} +} +func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src) +} +func (m *AuthUserDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{85} +} +func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src) +} +func (m *AuthUserChangePasswordResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{86} +} +func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src) +} +func (m *AuthUserGrantRoleResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{87} +} +func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src) +} +func (m *AuthUserRevokeRoleResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{88} +} +func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleAddResponse.Merge(m, src) +} +func (m *AuthRoleAddResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleAddResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{89} +} +func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleGetResponse.Merge(m, src) +} +func (m *AuthRoleGetResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleGetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{90} +} +func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleListResponse.Merge(m, src) +} +func (m *AuthRoleListResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{91} +} +func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthUserListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthUserListResponse.Merge(m, src) +} +func (m *AuthUserListResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthUserListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{92} +} +func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src) +} +func (m *AuthRoleDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{93} +} +func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src) +} +func (m *AuthRoleGrantPermissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{94} +} +func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src) +} +func (m *AuthRoleRevokePermissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) + proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value) + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") + proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") + proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") + proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest") + proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 4110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0xdd, 0x73, 0x1b, 0xc9, + 0x71, 0xe7, 0x02, 0x24, 0x01, 0x34, 0x3e, 0x08, 0x0e, 0x29, 0x0a, 0xc2, 0x49, 0x14, 0x6f, 0x74, + 0xd2, 0xf1, 0xa4, 0x3b, 0xe2, 0x4c, 0xdb, 0xb9, 0x2a, 0x25, 0x71, 0x0c, 0x91, 0x38, 0x89, 0x47, + 0x8a, 0xe4, 0x2d, 0x21, 0xdd, 0x47, 0xb9, 0xc2, 0x5a, 0x02, 0x23, 0x72, 0x43, 0x60, 0x17, 0xde, + 0x5d, 0x40, 0xe4, 0xe5, 0xc3, 0x2e, 0x97, 0xe3, 0x4a, 0x5e, 0xed, 0xaa, 0x54, 0xf2, 0x90, 0xbc, + 0xa4, 0x52, 0x2e, 0x3f, 0xf8, 0x39, 0xff, 0x42, 0x9e, 0xf2, 0x51, 0xf9, 0x07, 0x52, 0x17, 0xbf, + 0x24, 0x7f, 0x85, 0x6b, 0xbe, 0x76, 0x67, 0xf6, 0x83, 0x92, 0x8d, 0xbb, 0x7b, 0x11, 0x31, 0x3d, + 0x3d, 0xfd, 0xeb, 0xe9, 0x99, 0xe9, 0xee, 0xe9, 0x59, 0x41, 0xc9, 0x1b, 0xf5, 0x36, 0x46, 0x9e, + 0x1b, 0xb8, 0xa8, 0x42, 0x82, 0x5e, 0xdf, 0x27, 0xde, 0x84, 0x78, 0xa3, 0x93, 0xe6, 0xf2, 0xa9, + 0x7b, 0xea, 0xb2, 0x8e, 0x16, 0xfd, 0xc5, 0x79, 0x9a, 0x0d, 0xca, 0xd3, 0xb2, 0x46, 0x76, 0x6b, + 0x38, 0xe9, 0xf5, 0x46, 0x27, 0xad, 0xf3, 0x89, 0xe8, 0x69, 0x86, 0x3d, 0xd6, 0x38, 0x38, 0x1b, + 0x9d, 0xb0, 0x3f, 0xa2, 0xef, 0xe6, 0xa9, 0xeb, 0x9e, 0x0e, 0x08, 0xef, 0x75, 0x1c, 0x37, 0xb0, + 0x02, 0xdb, 0x75, 0x7c, 0xde, 0x8b, 0xff, 0xda, 0x80, 0x9a, 0x49, 0xfc, 0x91, 0xeb, 0xf8, 0xe4, + 0x09, 0xb1, 0xfa, 0xc4, 0x43, 0xb7, 0x00, 0x7a, 0x83, 0xb1, 0x1f, 0x10, 0xef, 0xd8, 0xee, 0x37, + 0x8c, 0x35, 0x63, 0x7d, 0xd6, 0x2c, 0x09, 0xca, 0x4e, 0x1f, 0xbd, 0x01, 0xa5, 0x21, 0x19, 0x9e, + 0xf0, 0xde, 0x1c, 0xeb, 0x2d, 0x72, 0xc2, 0x4e, 0x1f, 0x35, 0xa1, 0xe8, 0x91, 0x89, 0xed, 0xdb, + 0xae, 0xd3, 0xc8, 0xaf, 0x19, 0xeb, 0x79, 0x33, 0x6c, 0xd3, 0x81, 0x9e, 0xf5, 0x22, 0x38, 0x0e, + 0x88, 0x37, 0x6c, 0xcc, 0xf2, 0x81, 0x94, 0xd0, 0x25, 0xde, 0x10, 0xff, 0x74, 0x0e, 0x2a, 0xa6, + 0xe5, 0x9c, 0x12, 0x93, 0xfc, 0x70, 0x4c, 0xfc, 0x00, 0xd5, 0x21, 0x7f, 0x4e, 0x2e, 0x19, 0x7c, + 0xc5, 0xa4, 0x3f, 0xf9, 0x78, 0xe7, 0x94, 0x1c, 0x13, 0x87, 0x03, 0x57, 0xe8, 0x78, 0xe7, 0x94, + 0x74, 0x9c, 0x3e, 0x5a, 0x86, 0xb9, 0x81, 0x3d, 0xb4, 0x03, 0x81, 0xca, 0x1b, 0x9a, 0x3a, 0xb3, + 0x31, 0x75, 0xb6, 0x00, 0x7c, 0xd7, 0x0b, 0x8e, 0x5d, 0xaf, 0x4f, 0xbc, 0xc6, 0xdc, 0x9a, 0xb1, + 0x5e, 0xdb, 0x7c, 0x6b, 0x43, 0x5d, 0x86, 0x0d, 0x55, 0xa1, 0x8d, 0x23, 0xd7, 0x0b, 0x0e, 0x28, + 0xaf, 0x59, 0xf2, 0xe5, 0x4f, 0xf4, 0x21, 0x94, 0x99, 0x90, 0xc0, 0xf2, 0x4e, 0x49, 0xd0, 0x98, + 0x67, 0x52, 0xee, 0xbe, 0x42, 0x4a, 0x97, 0x31, 0x9b, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x3e, + 0xf1, 0x6c, 0x6b, 0x60, 0x7f, 0x61, 0x9d, 0x0c, 0x48, 0xa3, 0xb0, 0x66, 0xac, 0x17, 0x4d, 0x8d, + 0x46, 0xe7, 0x7f, 0x4e, 0x2e, 0xfd, 0x63, 0xd7, 0x19, 0x5c, 0x36, 0x8a, 0x8c, 0xa1, 0x48, 0x09, + 0x07, 0xce, 0xe0, 0x92, 0x2d, 0x9a, 0x3b, 0x76, 0x02, 0xde, 0x5b, 0x62, 0xbd, 0x25, 0x46, 0x61, + 0xdd, 0xeb, 0x50, 0x1f, 0xda, 0xce, 0xf1, 0xd0, 0xed, 0x1f, 0x87, 0x06, 0x01, 0x66, 0x90, 0xda, + 0xd0, 0x76, 0x9e, 0xba, 0x7d, 0x53, 0x9a, 0x85, 0x72, 0x5a, 0x17, 0x3a, 0x67, 0x59, 0x70, 0x5a, + 0x17, 0x2a, 0xe7, 0x06, 0x2c, 0x51, 0x99, 0x3d, 0x8f, 0x58, 0x01, 0x89, 0x98, 0x2b, 0x8c, 0x79, + 0x71, 0x68, 0x3b, 0x5b, 0xac, 0x47, 0xe3, 0xb7, 0x2e, 0x12, 0xfc, 0x55, 0xc1, 0x6f, 0x5d, 0xe8, + 0xfc, 0x78, 0x03, 0x4a, 0xa1, 0xcd, 0x51, 0x11, 0x66, 0xf7, 0x0f, 0xf6, 0x3b, 0xf5, 0x19, 0x04, + 0x30, 0xdf, 0x3e, 0xda, 0xea, 0xec, 0x6f, 0xd7, 0x0d, 0x54, 0x86, 0xc2, 0x76, 0x87, 0x37, 0x72, + 0xf8, 0x11, 0x40, 0x64, 0x5d, 0x54, 0x80, 0xfc, 0x6e, 0xe7, 0xb3, 0xfa, 0x0c, 0xe5, 0x79, 0xde, + 0x31, 0x8f, 0x76, 0x0e, 0xf6, 0xeb, 0x06, 0x1d, 0xbc, 0x65, 0x76, 0xda, 0xdd, 0x4e, 0x3d, 0x47, + 0x39, 0x9e, 0x1e, 0x6c, 0xd7, 0xf3, 0xa8, 0x04, 0x73, 0xcf, 0xdb, 0x7b, 0xcf, 0x3a, 0xf5, 0x59, + 0xfc, 0x0b, 0x03, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xf9, 0x33, 0x76, 0x2e, 0xd8, + 0x56, 0x2c, 0x6f, 0xde, 0x8c, 0x2d, 0xae, 0x76, 0x76, 0x4c, 0xc1, 0x8b, 0x30, 0xe4, 0xcf, 0x27, + 0x7e, 0x23, 0xb7, 0x96, 0x5f, 0x2f, 0x6f, 0xd6, 0x37, 0xf8, 0x79, 0xdd, 0xd8, 0x25, 0x97, 0xcf, + 0xad, 0xc1, 0x98, 0x98, 0xb4, 0x13, 0x21, 0x98, 0x1d, 0xba, 0x1e, 0x61, 0x3b, 0xb6, 0x68, 0xb2, + 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xaf, 0x0c, 0x80, 0xc3, 0x71, 0x90, + 0x7d, 0x34, 0x96, 0x61, 0x6e, 0x42, 0x05, 0x8b, 0x63, 0xc1, 0x1b, 0xec, 0x4c, 0x10, 0xcb, 0x27, + 0xe1, 0x99, 0xa0, 0x0d, 0x74, 0x1d, 0x0a, 0x23, 0x8f, 0x4c, 0x8e, 0xcf, 0x27, 0x0c, 0xa4, 0x68, + 0xce, 0xd3, 0xe6, 0xee, 0x04, 0xbd, 0x09, 0x15, 0xfb, 0xd4, 0x71, 0x3d, 0x72, 0xcc, 0x65, 0xcd, + 0xb1, 0xde, 0x32, 0xa7, 0x31, 0xbd, 0x15, 0x16, 0x2e, 0x78, 0x5e, 0x65, 0xd9, 0xa3, 0x24, 0xec, + 0x40, 0x99, 0xa9, 0x3a, 0x95, 0xf9, 0xde, 0x89, 0x74, 0xcc, 0xb1, 0x61, 0x49, 0x13, 0x0a, 0xad, + 0xf1, 0x0f, 0x00, 0x6d, 0x93, 0x01, 0x09, 0xc8, 0x34, 0xde, 0x43, 0xb1, 0x49, 0x5e, 0xb5, 0x09, + 0xfe, 0xb9, 0x01, 0x4b, 0x9a, 0xf8, 0xa9, 0xa6, 0xd5, 0x80, 0x42, 0x9f, 0x09, 0xe3, 0x1a, 0xe4, + 0x4d, 0xd9, 0x44, 0x0f, 0xa0, 0x28, 0x14, 0xf0, 0x1b, 0xf9, 0x8c, 0x4d, 0x53, 0xe0, 0x3a, 0xf9, + 0xf8, 0x57, 0x39, 0x28, 0x89, 0x89, 0x1e, 0x8c, 0x50, 0x1b, 0xaa, 0x1e, 0x6f, 0x1c, 0xb3, 0xf9, + 0x08, 0x8d, 0x9a, 0xd9, 0x4e, 0xe8, 0xc9, 0x8c, 0x59, 0x11, 0x43, 0x18, 0x19, 0xfd, 0x21, 0x94, + 0xa5, 0x88, 0xd1, 0x38, 0x10, 0x26, 0x6f, 0xe8, 0x02, 0xa2, 0xfd, 0xf7, 0x64, 0xc6, 0x04, 0xc1, + 0x7e, 0x38, 0x0e, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x9e, 0x49, 0x59, 0xd3, + 0xa5, 0x24, 0x97, 0xea, 0xc9, 0x8c, 0x89, 0xc4, 0x78, 0xa5, 0x53, 0x55, 0x29, 0xb8, 0xe0, 0xce, + 0x3b, 0xa1, 0x52, 0xf7, 0xc2, 0x49, 0xaa, 0xd4, 0xbd, 0x70, 0x1e, 0x95, 0xa0, 0x20, 0x5a, 0xf8, + 0x5f, 0x73, 0x00, 0x72, 0x35, 0x0e, 0x46, 0x68, 0x1b, 0x6a, 0x9e, 0x68, 0x69, 0xd6, 0x7a, 0x23, + 0xd5, 0x5a, 0x62, 0x11, 0x67, 0xcc, 0xaa, 0x1c, 0xc4, 0x95, 0xfb, 0x1e, 0x54, 0x42, 0x29, 0x91, + 0xc1, 0x6e, 0xa4, 0x18, 0x2c, 0x94, 0x50, 0x96, 0x03, 0xa8, 0xc9, 0x3e, 0x81, 0x6b, 0xe1, 0xf8, + 0x14, 0x9b, 0xbd, 0x79, 0x85, 0xcd, 0x42, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc8, + 0x6c, 0x37, 0x52, 0xcc, 0x96, 0x54, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0x7f, 0x79, + 0x28, 0x6c, 0xb9, 0xc3, 0x91, 0xe5, 0xd1, 0xd5, 0x98, 0xf7, 0x88, 0x3f, 0x1e, 0x04, 0xcc, 0x5c, + 0xb5, 0xcd, 0x3b, 0xba, 0x44, 0xc1, 0x26, 0xff, 0x9a, 0x8c, 0xd5, 0x14, 0x43, 0xe8, 0x60, 0x11, + 0x1e, 0x73, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x7c, 0x74, 0x90, 0x9b, 0x50, + 0x98, 0x10, 0x2f, 0x0a, 0xe9, 0x4f, 0x66, 0x4c, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x3c, 0xbc, 0xcc, + 0x09, 0x9e, 0x5a, 0x4f, 0x8f, 0x46, 0x77, 0xa0, 0xa2, 0xc5, 0xb8, 0x79, 0xc1, 0x57, 0x1e, 0x2a, + 0x21, 0x6e, 0x45, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0x66, 0xa4, 0x67, 0x5d, 0x91, 0x9e, 0xb5, + 0x28, 0x46, 0x09, 0xdf, 0xaa, 0x39, 0x99, 0xef, 0xeb, 0x4e, 0x06, 0x7f, 0x1f, 0xaa, 0x9a, 0x81, + 0x68, 0xdc, 0xe9, 0x7c, 0xfc, 0xac, 0xbd, 0xc7, 0x83, 0xd4, 0x63, 0x16, 0x97, 0xcc, 0xba, 0x41, + 0x63, 0xdd, 0x5e, 0xe7, 0xe8, 0xa8, 0x9e, 0x43, 0x55, 0x28, 0xed, 0x1f, 0x74, 0x8f, 0x39, 0x57, + 0x1e, 0x3f, 0x0e, 0x25, 0x88, 0x20, 0xa7, 0xc4, 0xb6, 0x19, 0x25, 0xb6, 0x19, 0x32, 0xb6, 0xe5, + 0xa2, 0xd8, 0xc6, 0xc2, 0xdc, 0x5e, 0xa7, 0x7d, 0xd4, 0xa9, 0xcf, 0x3e, 0xaa, 0x41, 0x85, 0xdb, + 0xf7, 0x78, 0xec, 0xd0, 0x50, 0xfb, 0xcf, 0x06, 0x40, 0x74, 0x9a, 0x50, 0x0b, 0x0a, 0x3d, 0x8e, + 0xd3, 0x30, 0x98, 0x33, 0xba, 0x96, 0xba, 0x64, 0xa6, 0xe4, 0x42, 0xdf, 0x82, 0x82, 0x3f, 0xee, + 0xf5, 0x88, 0x2f, 0x43, 0xde, 0xf5, 0xb8, 0x3f, 0x14, 0xde, 0xca, 0x94, 0x7c, 0x74, 0xc8, 0x0b, + 0xcb, 0x1e, 0x8c, 0x59, 0x00, 0xbc, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0x60, 0x40, 0x59, 0xd9, 0xbc, + 0xbf, 0xa7, 0x13, 0xbe, 0x09, 0x25, 0xa6, 0x03, 0xe9, 0x0b, 0x37, 0x5c, 0x34, 0x23, 0x02, 0xfa, + 0x03, 0x28, 0xc9, 0x13, 0x20, 0x3d, 0x71, 0x23, 0x5d, 0xec, 0xc1, 0xc8, 0x8c, 0x58, 0xf1, 0x2e, + 0x2c, 0x32, 0xab, 0xf4, 0x68, 0x72, 0x2d, 0xed, 0xa8, 0xa6, 0x9f, 0x46, 0x2c, 0xfd, 0x6c, 0x42, + 0x71, 0x74, 0x76, 0xe9, 0xdb, 0x3d, 0x6b, 0x20, 0xb4, 0x08, 0xdb, 0xf8, 0x23, 0x40, 0xaa, 0xb0, + 0x69, 0xa6, 0x8b, 0xab, 0x50, 0x7e, 0x62, 0xf9, 0x67, 0x42, 0x25, 0xfc, 0x00, 0xaa, 0xb4, 0xb9, + 0xfb, 0xfc, 0x35, 0x74, 0x64, 0x97, 0x03, 0xc9, 0x3d, 0x95, 0xcd, 0x11, 0xcc, 0x9e, 0x59, 0xfe, + 0x19, 0x9b, 0x68, 0xd5, 0x64, 0xbf, 0xd1, 0x3b, 0x50, 0xef, 0xf1, 0x49, 0x1e, 0xc7, 0xae, 0x0c, + 0x0b, 0x82, 0x1e, 0x66, 0x82, 0x9f, 0x42, 0x85, 0xcf, 0xe1, 0xab, 0x56, 0x02, 0x2f, 0xc2, 0xc2, + 0x91, 0x63, 0x8d, 0xfc, 0x33, 0x57, 0x46, 0x37, 0x3a, 0xe9, 0x7a, 0x44, 0x9b, 0x0a, 0xf1, 0x6d, + 0x58, 0xf0, 0xc8, 0xd0, 0xb2, 0x1d, 0xdb, 0x39, 0x3d, 0x3e, 0xb9, 0x0c, 0x88, 0x2f, 0x2e, 0x4c, + 0xb5, 0x90, 0xfc, 0x88, 0x52, 0xa9, 0x6a, 0x27, 0x03, 0xf7, 0x44, 0xb8, 0x39, 0xf6, 0x1b, 0xff, + 0x2c, 0x07, 0x95, 0x4f, 0xac, 0xa0, 0x27, 0x97, 0x0e, 0xed, 0x40, 0x2d, 0x74, 0x6e, 0x8c, 0x22, + 0x74, 0x89, 0x85, 0x58, 0x36, 0x46, 0xa6, 0xd2, 0x32, 0x3a, 0x56, 0x7b, 0x2a, 0x81, 0x89, 0xb2, + 0x9c, 0x1e, 0x19, 0x84, 0xa2, 0x72, 0xd9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, 0xa0, 0x03, 0xa8, + 0x8f, 0x3c, 0xf7, 0xd4, 0x23, 0xbe, 0x1f, 0x0a, 0xe3, 0x61, 0x0c, 0xa7, 0x08, 0x3b, 0x14, 0xac, + 0x91, 0xb8, 0x85, 0x91, 0x4e, 0x7a, 0xb4, 0x10, 0xe5, 0x33, 0xdc, 0x39, 0xfd, 0x57, 0x0e, 0x50, + 0x72, 0x52, 0xbf, 0x6b, 0x8a, 0x77, 0x17, 0x6a, 0x7e, 0x60, 0x79, 0x89, 0xcd, 0x56, 0x65, 0xd4, + 0xd0, 0xe3, 0xbf, 0x0d, 0xa1, 0x42, 0xc7, 0x8e, 0x1b, 0xd8, 0x2f, 0x2e, 0x45, 0x96, 0x5c, 0x93, + 0xe4, 0x7d, 0x46, 0x45, 0x1d, 0x28, 0xbc, 0xb0, 0x07, 0x01, 0xf1, 0xfc, 0xc6, 0xdc, 0x5a, 0x7e, + 0xbd, 0xb6, 0xf9, 0xe0, 0x55, 0xcb, 0xb0, 0xf1, 0x21, 0xe3, 0xef, 0x5e, 0x8e, 0x88, 0x29, 0xc7, + 0xaa, 0x99, 0xe7, 0xbc, 0x96, 0x8d, 0xdf, 0x80, 0xe2, 0x4b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, + 0x59, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe1, 0x59, 0xa7, 0x43, 0xe2, 0x04, 0xf2, 0x1e, 0x28, 0xdb, + 0xf8, 0x2e, 0x40, 0x04, 0x43, 0x5d, 0xfe, 0xfe, 0xc1, 0xe1, 0xb3, 0x6e, 0x7d, 0x06, 0x55, 0xa0, + 0xb8, 0x7f, 0xb0, 0xdd, 0xd9, 0xeb, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xda, 0x5a, 0xaa, 0x98, + 0x86, 0x86, 0x89, 0x57, 0x60, 0x39, 0x6d, 0x01, 0x69, 0x2e, 0x5a, 0x15, 0xbb, 0x74, 0xaa, 0xa3, + 0xa2, 0x42, 0xe7, 0xf4, 0xe9, 0x36, 0xa0, 0xc0, 0x77, 0x6f, 0x5f, 0x24, 0xe7, 0xb2, 0x49, 0x0d, + 0xc1, 0x37, 0x23, 0xe9, 0x8b, 0x55, 0x0a, 0xdb, 0xa9, 0xee, 0x65, 0x2e, 0xd5, 0xbd, 0xa0, 0x3b, + 0x50, 0x0d, 0x4f, 0x83, 0xe5, 0x8b, 0x5c, 0xa0, 0x64, 0x56, 0xe4, 0x46, 0xa7, 0x34, 0xcd, 0xe8, + 0x05, 0xdd, 0xe8, 0xe8, 0x2e, 0xcc, 0x93, 0x09, 0x71, 0x02, 0xbf, 0x51, 0x66, 0x11, 0xa3, 0x2a, + 0x73, 0xf7, 0x0e, 0xa5, 0x9a, 0xa2, 0x13, 0x7f, 0x17, 0x16, 0xd9, 0x1d, 0xe9, 0xb1, 0x67, 0x39, + 0xea, 0x65, 0xae, 0xdb, 0xdd, 0x13, 0xe6, 0xa6, 0x3f, 0x51, 0x0d, 0x72, 0x3b, 0xdb, 0xc2, 0x08, + 0xb9, 0x9d, 0x6d, 0xfc, 0x13, 0x03, 0x90, 0x3a, 0x6e, 0x2a, 0x3b, 0xc7, 0x84, 0x4b, 0xf8, 0x7c, + 0x04, 0xbf, 0x0c, 0x73, 0xc4, 0xf3, 0x5c, 0x8f, 0x59, 0xb4, 0x64, 0xf2, 0x06, 0x7e, 0x4b, 0xe8, + 0x60, 0x92, 0x89, 0x7b, 0x1e, 0x9e, 0x41, 0x2e, 0xcd, 0x08, 0x55, 0xdd, 0x85, 0x25, 0x8d, 0x6b, + 0xaa, 0xc8, 0xf5, 0x21, 0x2c, 0x30, 0x61, 0x5b, 0x67, 0xa4, 0x77, 0x3e, 0x72, 0x6d, 0x27, 0x81, + 0x47, 0x57, 0x2e, 0x72, 0xb0, 0x74, 0x1e, 0x7c, 0x62, 0x95, 0x90, 0xd8, 0xed, 0xee, 0xe1, 0xcf, + 0x60, 0x25, 0x26, 0x47, 0xaa, 0xff, 0x27, 0x50, 0xee, 0x85, 0x44, 0x5f, 0xe4, 0x3a, 0xb7, 0x74, + 0xe5, 0xe2, 0x43, 0xd5, 0x11, 0xf8, 0x00, 0xae, 0x27, 0x44, 0x4f, 0x35, 0xe7, 0xb7, 0xe1, 0x1a, + 0x13, 0xb8, 0x4b, 0xc8, 0xa8, 0x3d, 0xb0, 0x27, 0x99, 0x96, 0x1e, 0x89, 0x49, 0x29, 0x8c, 0x5f, + 0xef, 0xbe, 0xc0, 0x7f, 0x24, 0x10, 0xbb, 0xf6, 0x90, 0x74, 0xdd, 0xbd, 0x6c, 0xdd, 0x68, 0x34, + 0x3b, 0x27, 0x97, 0xbe, 0x48, 0x6b, 0xd8, 0x6f, 0xfc, 0x2f, 0x86, 0x30, 0x95, 0x3a, 0xfc, 0x6b, + 0xde, 0xc9, 0xab, 0x00, 0xa7, 0xf4, 0xc8, 0x90, 0x3e, 0xed, 0xe0, 0x15, 0x15, 0x85, 0x12, 0xea, + 0x49, 0xfd, 0x77, 0x45, 0xe8, 0xb9, 0x2c, 0xf6, 0x39, 0xfb, 0x27, 0xf4, 0x72, 0xb7, 0xa0, 0xcc, + 0x08, 0x47, 0x81, 0x15, 0x8c, 0xfd, 0xc4, 0x62, 0xfc, 0x95, 0xd8, 0xf6, 0x72, 0xd0, 0x54, 0xf3, + 0xfa, 0x16, 0xcc, 0xb3, 0xcb, 0x84, 0x4c, 0xa5, 0x6f, 0xa4, 0xec, 0x47, 0xae, 0x87, 0x29, 0x18, + 0xf1, 0xcf, 0x0c, 0x98, 0x7f, 0xca, 0x4a, 0xb0, 0x8a, 0x6a, 0xb3, 0x72, 0x2d, 0x1c, 0x6b, 0xc8, + 0x0b, 0x43, 0x25, 0x93, 0xfd, 0x66, 0xa9, 0x27, 0x21, 0xde, 0x33, 0x73, 0x8f, 0xa7, 0xb8, 0x25, + 0x33, 0x6c, 0x53, 0x9b, 0xf5, 0x06, 0x36, 0x71, 0x02, 0xd6, 0x3b, 0xcb, 0x7a, 0x15, 0x0a, 0xcd, + 0x9e, 0x6d, 0x7f, 0x8f, 0x58, 0x9e, 0x23, 0x8a, 0xa6, 0x45, 0x33, 0x22, 0xe0, 0x3d, 0xa8, 0x73, + 0x3d, 0xda, 0xfd, 0xbe, 0x92, 0x60, 0x86, 0x68, 0x46, 0x0c, 0x4d, 0x93, 0x96, 0x8b, 0x4b, 0xfb, + 0xa5, 0x01, 0x8b, 0x8a, 0xb8, 0xa9, 0xac, 0xfa, 0x2e, 0xcc, 0xf3, 0x22, 0xb5, 0xc8, 0x74, 0x96, + 0xf5, 0x51, 0x1c, 0xc6, 0x14, 0x3c, 0x68, 0x03, 0x0a, 0xfc, 0x97, 0xbc, 0x03, 0xa4, 0xb3, 0x4b, + 0x26, 0x7c, 0x17, 0x96, 0x04, 0x89, 0x0c, 0xdd, 0xb4, 0x83, 0xc1, 0x16, 0x03, 0xff, 0x05, 0x2c, + 0xeb, 0x6c, 0x53, 0x4d, 0x49, 0x51, 0x32, 0xf7, 0x3a, 0x4a, 0xb6, 0xa5, 0x92, 0xcf, 0x46, 0x7d, + 0x25, 0x8f, 0x8a, 0xef, 0x18, 0x75, 0xbd, 0x72, 0xfa, 0x7a, 0x45, 0x13, 0x90, 0x22, 0xbe, 0xd1, + 0x09, 0x7c, 0x20, 0xb7, 0xc3, 0x9e, 0xed, 0x87, 0x3e, 0x1c, 0x43, 0x65, 0x60, 0x3b, 0xc4, 0xf2, + 0x44, 0xe5, 0xdc, 0xe0, 0x95, 0x73, 0x95, 0x86, 0xbf, 0x00, 0xa4, 0x0e, 0xfc, 0x46, 0x95, 0xbe, + 0x27, 0x4d, 0x76, 0xe8, 0xb9, 0x43, 0x37, 0xd3, 0xec, 0xf8, 0x2f, 0xe1, 0x5a, 0x8c, 0xef, 0x1b, + 0x55, 0x73, 0x09, 0x16, 0xb7, 0x89, 0x4c, 0x68, 0xa4, 0xdb, 0xfb, 0x08, 0x90, 0x4a, 0x9c, 0x2a, + 0xb2, 0xb5, 0x60, 0xf1, 0xa9, 0x3b, 0xa1, 0x2e, 0x92, 0x52, 0x23, 0xdf, 0xc0, 0xeb, 0x10, 0xa1, + 0x29, 0xc2, 0x36, 0x05, 0x57, 0x07, 0x4c, 0x05, 0xfe, 0x1f, 0x06, 0x54, 0xda, 0x03, 0xcb, 0x1b, + 0x4a, 0xe0, 0xef, 0xc1, 0x3c, 0xbf, 0x5d, 0x8b, 0x82, 0xd6, 0x3d, 0x5d, 0x8c, 0xca, 0xcb, 0x1b, + 0x6d, 0x7e, 0x17, 0x17, 0xa3, 0xa8, 0xe2, 0xe2, 0xcd, 0x6b, 0x3b, 0xf6, 0x06, 0xb6, 0x8d, 0xde, + 0x83, 0x39, 0x8b, 0x0e, 0x61, 0xa1, 0xa8, 0x16, 0xaf, 0x6b, 0x30, 0x69, 0xec, 0x0e, 0xc0, 0xb9, + 0xf0, 0x77, 0xa0, 0xac, 0x20, 0xa0, 0x02, 0xe4, 0x1f, 0x77, 0x44, 0xc2, 0xde, 0xde, 0xea, 0xee, + 0x3c, 0xe7, 0x05, 0x9d, 0x1a, 0xc0, 0x76, 0x27, 0x6c, 0xe7, 0xf0, 0xa7, 0x62, 0x94, 0x70, 0xfb, + 0xaa, 0x3e, 0x46, 0x96, 0x3e, 0xb9, 0xd7, 0xd2, 0xe7, 0x02, 0xaa, 0x62, 0xfa, 0xd3, 0x86, 0x31, + 0x26, 0x2f, 0x23, 0x8c, 0x29, 0xca, 0x9b, 0x82, 0x11, 0xff, 0xda, 0x80, 0xfa, 0xb6, 0xfb, 0xd2, + 0x39, 0xf5, 0xac, 0x7e, 0x78, 0x4e, 0x3e, 0x8c, 0xad, 0xd4, 0x46, 0xac, 0x38, 0x1a, 0xe3, 0x8f, + 0x08, 0xb1, 0x15, 0x6b, 0x44, 0x65, 0x43, 0x1e, 0x0b, 0x65, 0x13, 0x7f, 0x00, 0x0b, 0xb1, 0x41, + 0xd4, 0xf6, 0xcf, 0xdb, 0x7b, 0x3b, 0xdb, 0xd4, 0xd6, 0xac, 0xb0, 0xd6, 0xd9, 0x6f, 0x3f, 0xda, + 0xeb, 0x88, 0x07, 0xa4, 0xf6, 0xfe, 0x56, 0x67, 0xaf, 0x9e, 0xc3, 0x3d, 0x58, 0x54, 0xe0, 0xa7, + 0x7d, 0x19, 0xc8, 0xd0, 0x6e, 0x01, 0xaa, 0x22, 0xda, 0x8b, 0x43, 0xf9, 0xef, 0x39, 0xa8, 0x49, + 0xca, 0xd7, 0x83, 0x89, 0x56, 0x60, 0xbe, 0x7f, 0x72, 0x64, 0x7f, 0x21, 0x5f, 0x8e, 0x44, 0x8b, + 0xd2, 0x07, 0x1c, 0x87, 0x3f, 0xdf, 0x8a, 0x16, 0x0d, 0xe3, 0x9e, 0xf5, 0x22, 0xd8, 0x71, 0xfa, + 0xe4, 0x82, 0x25, 0x05, 0xb3, 0x66, 0x44, 0x60, 0x15, 0x26, 0xf1, 0xcc, 0xcb, 0x6e, 0x56, 0xca, + 0xb3, 0x2f, 0xba, 0x0f, 0x75, 0xfa, 0xbb, 0x3d, 0x1a, 0x0d, 0x6c, 0xd2, 0xe7, 0x02, 0x0a, 0x8c, + 0x27, 0x41, 0xa7, 0xe8, 0xec, 0x2e, 0xe2, 0x37, 0x8a, 0x2c, 0x2c, 0x89, 0x16, 0x5a, 0x83, 0x32, + 0xd7, 0x6f, 0xc7, 0x79, 0xe6, 0x13, 0xf6, 0xf6, 0x99, 0x37, 0x55, 0x92, 0x9e, 0x66, 0x40, 0x3c, + 0xcd, 0x58, 0x82, 0xc5, 0xf6, 0x38, 0x38, 0xeb, 0x38, 0x34, 0x56, 0x48, 0x2b, 0x2f, 0x03, 0xa2, + 0xc4, 0x6d, 0xdb, 0x57, 0xa9, 0x82, 0x55, 0x5f, 0x90, 0x0e, 0x2c, 0x51, 0x22, 0x71, 0x02, 0xbb, + 0xa7, 0xc4, 0x55, 0x99, 0x79, 0x19, 0xb1, 0xcc, 0xcb, 0xf2, 0xfd, 0x97, 0xae, 0xd7, 0x17, 0x36, + 0x0f, 0xdb, 0xf8, 0x9f, 0x0c, 0x0e, 0xf9, 0xcc, 0xd7, 0xd2, 0xa7, 0xdf, 0x51, 0x0c, 0x7a, 0x1f, + 0x0a, 0xee, 0x88, 0xbd, 0xf0, 0x8b, 0x32, 0xcc, 0xca, 0x06, 0xff, 0x26, 0x60, 0x43, 0x08, 0x3e, + 0xe0, 0xbd, 0xa6, 0x64, 0x43, 0xf7, 0xa0, 0x76, 0x66, 0xf9, 0x67, 0xa4, 0x7f, 0x28, 0x65, 0xf2, + 0x9b, 0x5f, 0x8c, 0x8a, 0xd7, 0x23, 0xfd, 0x1e, 0x93, 0xe0, 0x0a, 0xfd, 0xf0, 0x03, 0xb8, 0x26, + 0x39, 0xc5, 0xeb, 0xc4, 0x15, 0xcc, 0x2f, 0xe1, 0x96, 0x64, 0xde, 0x3a, 0xb3, 0x9c, 0x53, 0x22, + 0x01, 0x7f, 0x5f, 0x0b, 0x24, 0xe7, 0x93, 0x4f, 0x9d, 0xcf, 0x23, 0x68, 0x84, 0xf3, 0x61, 0x37, + 0x6b, 0x77, 0xa0, 0x2a, 0x3a, 0xf6, 0xc5, 0x79, 0x2a, 0x99, 0xec, 0x37, 0xa5, 0x79, 0xee, 0x20, + 0x4c, 0xa5, 0xe9, 0x6f, 0xbc, 0x05, 0x37, 0xa4, 0x0c, 0x71, 0xe7, 0xd5, 0x85, 0x24, 0x14, 0x4f, + 0x13, 0x22, 0x0c, 0x4b, 0x87, 0x5e, 0xbd, 0xf0, 0x2a, 0xa7, 0xbe, 0x04, 0x4c, 0xa6, 0xa1, 0xc8, + 0xbc, 0xc6, 0x37, 0x25, 0x55, 0x4c, 0xc9, 0x96, 0x24, 0x99, 0x0a, 0x50, 0xc9, 0x62, 0xc1, 0x28, + 0x39, 0xb1, 0x60, 0x09, 0xd1, 0x3f, 0x80, 0xd5, 0x50, 0x09, 0x6a, 0xb7, 0x43, 0xe2, 0x0d, 0x6d, + 0xdf, 0x57, 0xea, 0xde, 0x69, 0x13, 0xbf, 0x07, 0xb3, 0x23, 0x22, 0x82, 0x50, 0x79, 0x13, 0xc9, + 0x4d, 0xa9, 0x0c, 0x66, 0xfd, 0xb8, 0x0f, 0xb7, 0xa5, 0x74, 0x6e, 0xd1, 0x54, 0xf1, 0x71, 0xa5, + 0x64, 0x35, 0x30, 0x97, 0x51, 0x0d, 0xcc, 0xc7, 0xde, 0x62, 0x3e, 0xe2, 0x86, 0x94, 0x67, 0x7e, + 0xaa, 0xe4, 0x62, 0x97, 0xdb, 0x34, 0x74, 0x15, 0x53, 0x09, 0xfb, 0x1b, 0xe1, 0x05, 0xbe, 0x2a, + 0x0f, 0x4f, 0xd8, 0x0c, 0xe5, 0x43, 0x87, 0x6c, 0xd2, 0xac, 0x99, 0x2e, 0x80, 0xa9, 0xd6, 0x42, + 0x67, 0x4d, 0x8d, 0x86, 0x4f, 0x60, 0x59, 0xf7, 0x6b, 0x53, 0xe9, 0xb2, 0x0c, 0x73, 0x81, 0x7b, + 0x4e, 0x64, 0xac, 0xe1, 0x0d, 0x69, 0xbb, 0xd0, 0xe7, 0x4d, 0x65, 0x3b, 0x2b, 0x12, 0xc6, 0x4e, + 0xc7, 0xb4, 0xfa, 0xd2, 0x8d, 0x25, 0xef, 0x40, 0xbc, 0x81, 0xf7, 0x61, 0x25, 0xee, 0xd9, 0xa6, + 0x52, 0xf9, 0x39, 0x3f, 0x4b, 0x69, 0xce, 0x6f, 0x2a, 0xb9, 0x1f, 0x47, 0x7e, 0x49, 0xf1, 0x6d, + 0x53, 0x89, 0x34, 0xa1, 0x99, 0xe6, 0xea, 0xbe, 0x8a, 0xa3, 0x13, 0x7a, 0xbe, 0xa9, 0x84, 0xf9, + 0x91, 0xb0, 0xe9, 0x97, 0x3f, 0x72, 0x57, 0xf9, 0x2b, 0xdd, 0x95, 0x38, 0x24, 0x91, 0x43, 0xfd, + 0x1a, 0x36, 0x9d, 0xc0, 0x88, 0x7c, 0xf9, 0xb4, 0x18, 0x34, 0x9c, 0x85, 0x18, 0xac, 0x21, 0x37, + 0xb6, 0x1a, 0x01, 0xa6, 0x5a, 0x8c, 0x4f, 0x22, 0x37, 0x9e, 0x08, 0x12, 0x53, 0x09, 0xfe, 0x14, + 0xd6, 0xb2, 0xe3, 0xc3, 0x34, 0x92, 0xef, 0xb7, 0xa0, 0x14, 0x5e, 0x86, 0x94, 0xef, 0xcd, 0xca, + 0x50, 0xd8, 0x3f, 0x38, 0x3a, 0x6c, 0x6f, 0x75, 0xf8, 0x07, 0x67, 0x5b, 0x07, 0xa6, 0xf9, 0xec, + 0xb0, 0x5b, 0xcf, 0x6d, 0xfe, 0x26, 0x0f, 0xb9, 0xdd, 0xe7, 0xe8, 0x33, 0x98, 0xe3, 0x5f, 0x5f, + 0x5c, 0xf1, 0xc9, 0x4d, 0xf3, 0xaa, 0x0f, 0x4c, 0xf0, 0xf5, 0x9f, 0xfc, 0xf7, 0x6f, 0x7e, 0x91, + 0x5b, 0xc4, 0x95, 0xd6, 0xe4, 0xdb, 0xad, 0xf3, 0x49, 0x8b, 0x85, 0xa9, 0x87, 0xc6, 0x7d, 0xf4, + 0x31, 0xe4, 0x0f, 0xc7, 0x01, 0xca, 0xfc, 0x14, 0xa7, 0x99, 0xfd, 0xcd, 0x09, 0xbe, 0xc6, 0x84, + 0x2e, 0x60, 0x10, 0x42, 0x47, 0xe3, 0x80, 0x8a, 0xfc, 0x21, 0x94, 0xd5, 0x2f, 0x46, 0x5e, 0xf9, + 0x7d, 0x4e, 0xf3, 0xd5, 0x5f, 0xa3, 0xe0, 0x5b, 0x0c, 0xea, 0x3a, 0x46, 0x02, 0x8a, 0x7f, 0xd3, + 0xa2, 0xce, 0xa2, 0x7b, 0xe1, 0xa0, 0xcc, 0xaf, 0x77, 0x9a, 0xd9, 0x1f, 0xa8, 0x24, 0x66, 0x11, + 0x5c, 0x38, 0x54, 0xe4, 0x9f, 0x89, 0x6f, 0x53, 0x7a, 0x01, 0xba, 0x9d, 0xf2, 0x6d, 0x82, 0xfa, + 0x0a, 0xdf, 0x5c, 0xcb, 0x66, 0x10, 0x20, 0x37, 0x19, 0xc8, 0x0a, 0x5e, 0x14, 0x20, 0xbd, 0x90, + 0xe5, 0xa1, 0x71, 0x7f, 0xb3, 0x07, 0x73, 0xec, 0x85, 0x0b, 0x7d, 0x2e, 0x7f, 0x34, 0x53, 0x9e, + 0xfa, 0x32, 0x16, 0x5a, 0x7b, 0x1b, 0xc3, 0xcb, 0x0c, 0xa8, 0x86, 0x4b, 0x14, 0x88, 0xbd, 0x6f, + 0x3d, 0x34, 0xee, 0xaf, 0x1b, 0xef, 0x1b, 0x9b, 0xbf, 0x9e, 0x83, 0x39, 0x56, 0xda, 0x45, 0xe7, + 0x00, 0xd1, 0x6b, 0x4f, 0x7c, 0x76, 0x89, 0xf7, 0xa3, 0xf8, 0xec, 0x92, 0x0f, 0x45, 0xb8, 0xc9, + 0x40, 0x97, 0xf1, 0x02, 0x05, 0x65, 0x15, 0xe3, 0x16, 0x2b, 0x82, 0x53, 0x3b, 0xfe, 0xad, 0x21, + 0x2a, 0xdb, 0xfc, 0x2c, 0xa1, 0x34, 0x69, 0xda, 0x93, 0x4f, 0x7c, 0x3b, 0xa4, 0x3c, 0xf7, 0xe0, + 0xef, 0x32, 0xc0, 0x16, 0xae, 0x47, 0x80, 0x1e, 0xe3, 0x78, 0x68, 0xdc, 0xff, 0xbc, 0x81, 0x97, + 0x84, 0x95, 0x63, 0x3d, 0xe8, 0x47, 0x50, 0xd3, 0x9f, 0x34, 0xd0, 0x9d, 0x14, 0xac, 0xf8, 0xcb, + 0x48, 0xf3, 0xad, 0xab, 0x99, 0x84, 0x4e, 0xab, 0x4c, 0x27, 0x01, 0xce, 0x91, 0xcf, 0x09, 0x19, + 0x59, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x47, 0x43, 0xbc, 0x38, 0x45, 0x6f, 0x14, 0x28, 0x4d, 0x7a, + 0xe2, 0x05, 0xa4, 0x79, 0xf7, 0x15, 0x5c, 0x42, 0x89, 0x3f, 0x66, 0x4a, 0x7c, 0x80, 0x97, 0x23, + 0x25, 0x02, 0x7b, 0x48, 0x02, 0x57, 0x68, 0xf1, 0xf9, 0x4d, 0x7c, 0x5d, 0x33, 0x8e, 0xd6, 0x1b, + 0x2d, 0x16, 0x7f, 0x67, 0x48, 0x5d, 0x2c, 0xed, 0xdd, 0x22, 0x75, 0xb1, 0xf4, 0x47, 0x8a, 0xb4, + 0xc5, 0xe2, 0xaf, 0x0a, 0x69, 0x8b, 0x15, 0xf6, 0x6c, 0xfe, 0xff, 0x2c, 0x14, 0xb6, 0xf8, 0x37, + 0xe1, 0xc8, 0x85, 0x52, 0x58, 0xa6, 0x47, 0xab, 0x69, 0x75, 0xc6, 0xe8, 0x5a, 0xd3, 0xbc, 0x9d, + 0xd9, 0x2f, 0x14, 0x7a, 0x93, 0x29, 0xf4, 0x06, 0x5e, 0xa1, 0xc8, 0xe2, 0xb3, 0xf3, 0x16, 0x2f, + 0x66, 0xb5, 0xac, 0x7e, 0x9f, 0x1a, 0xe2, 0xcf, 0xa1, 0xa2, 0xd6, 0xd1, 0xd1, 0x9b, 0xa9, 0xb5, + 0x4d, 0xb5, 0x14, 0xdf, 0xc4, 0x57, 0xb1, 0x08, 0xe4, 0xb7, 0x18, 0xf2, 0x2a, 0xbe, 0x91, 0x82, + 0xec, 0x31, 0x56, 0x0d, 0x9c, 0xd7, 0xc0, 0xd3, 0xc1, 0xb5, 0x12, 0x7b, 0x3a, 0xb8, 0x5e, 0x42, + 0xbf, 0x12, 0x7c, 0xcc, 0x58, 0x29, 0xb8, 0x0f, 0x10, 0x55, 0xb2, 0x51, 0xaa, 0x2d, 0x95, 0x7b, + 0x5d, 0xdc, 0x39, 0x24, 0x8b, 0xe0, 0x18, 0x33, 0x58, 0xb1, 0xef, 0x62, 0xb0, 0x03, 0xdb, 0x0f, + 0xf8, 0xc1, 0xac, 0x6a, 0xa5, 0x69, 0x94, 0x3a, 0x1f, 0xbd, 0xbe, 0xdd, 0xbc, 0x73, 0x25, 0x8f, + 0x40, 0xbf, 0xcb, 0xd0, 0x6f, 0xe3, 0x66, 0x0a, 0xfa, 0x88, 0xf3, 0xd2, 0xcd, 0xf6, 0xe3, 0x02, + 0x94, 0x9f, 0x5a, 0xb6, 0x13, 0x10, 0xc7, 0x72, 0x7a, 0x04, 0x9d, 0xc0, 0x1c, 0x8b, 0xd4, 0x71, + 0x47, 0xac, 0x96, 0x6d, 0xe3, 0x8e, 0x58, 0xab, 0x69, 0xe2, 0x35, 0x06, 0xdc, 0xc4, 0xd7, 0x28, + 0xf0, 0x30, 0x12, 0xdd, 0x62, 0xa5, 0x48, 0x3a, 0xe9, 0x17, 0x30, 0x2f, 0x5e, 0xfb, 0x62, 0x82, + 0xb4, 0xe2, 0x4f, 0xf3, 0x66, 0x7a, 0x67, 0xda, 0x5e, 0x56, 0x61, 0x7c, 0xc6, 0x47, 0x71, 0x26, + 0x00, 0x51, 0x8d, 0x3d, 0xbe, 0xa2, 0x89, 0x92, 0x7c, 0x73, 0x2d, 0x9b, 0x21, 0xcd, 0xa6, 0x2a, + 0x66, 0x3f, 0xe4, 0xa5, 0xb8, 0x7f, 0x0a, 0xb3, 0x4f, 0x2c, 0xff, 0x0c, 0xc5, 0x62, 0xaf, 0xf2, + 0xad, 0x58, 0xb3, 0x99, 0xd6, 0x25, 0x50, 0x6e, 0x33, 0x94, 0x1b, 0xdc, 0x95, 0xa9, 0x28, 0x67, + 0x96, 0x7f, 0x26, 0xec, 0xc7, 0x3f, 0x1d, 0x8b, 0xdb, 0x4f, 0xfb, 0xfc, 0x2c, 0x6e, 0x3f, 0xfd, + 0x6b, 0xb3, 0x6c, 0xfb, 0x51, 0x94, 0xf3, 0x09, 0xc5, 0x19, 0x41, 0x51, 0x7e, 0xad, 0x85, 0x62, + 0x4f, 0xf7, 0xb1, 0x2f, 0xbb, 0x9a, 0xab, 0x59, 0xdd, 0x02, 0xed, 0x0e, 0x43, 0xbb, 0x85, 0x1b, + 0x89, 0xd5, 0x12, 0x9c, 0x0f, 0x8d, 0xfb, 0xef, 0x1b, 0xe8, 0x47, 0x00, 0xd1, 0xc3, 0x44, 0xe2, + 0x0c, 0xc6, 0xdf, 0x38, 0x12, 0x67, 0x30, 0xf1, 0xa6, 0x81, 0x37, 0x18, 0xee, 0x3a, 0xbe, 0x13, + 0xc7, 0x0d, 0x3c, 0xcb, 0xf1, 0x5f, 0x10, 0xef, 0x3d, 0x5e, 0x67, 0xf5, 0xcf, 0xec, 0x11, 0x9d, + 0xb2, 0x07, 0xa5, 0xb0, 0xee, 0x1c, 0xf7, 0xb7, 0xf1, 0x7a, 0x78, 0xdc, 0xdf, 0x26, 0x0a, 0xd6, + 0xba, 0xe3, 0xd1, 0xf6, 0x8b, 0x64, 0xa5, 0x47, 0xf0, 0x97, 0x75, 0x98, 0xa5, 0x79, 0x37, 0x4d, + 0x4f, 0xa2, 0xca, 0x49, 0x7c, 0xf6, 0x89, 0x3a, 0x6a, 0x7c, 0xf6, 0xc9, 0xa2, 0x8b, 0x9e, 0x9e, + 0xd0, 0x6b, 0x56, 0x8b, 0x17, 0x29, 0xe8, 0x4c, 0x5d, 0x28, 0x2b, 0xa5, 0x15, 0x94, 0x22, 0x4c, + 0x2f, 0xd0, 0xc6, 0x03, 0x5e, 0x4a, 0x5d, 0x06, 0xbf, 0xc1, 0xf0, 0xae, 0xf1, 0x80, 0xc7, 0xf0, + 0xfa, 0x9c, 0x83, 0x02, 0x8a, 0xd9, 0x89, 0x93, 0x9f, 0x32, 0x3b, 0xfd, 0xf4, 0xaf, 0x65, 0x33, + 0x64, 0xce, 0x2e, 0x3a, 0xfa, 0x2f, 0xa1, 0xa2, 0x16, 0x58, 0x50, 0x8a, 0xf2, 0xb1, 0xa2, 0x72, + 0x3c, 0x92, 0xa4, 0xd5, 0x67, 0x74, 0xdf, 0xc6, 0x20, 0x2d, 0x85, 0x8d, 0x02, 0x0f, 0xa0, 0x20, + 0x2a, 0x2e, 0x69, 0x26, 0xd5, 0x0b, 0xd0, 0x69, 0x26, 0x8d, 0x95, 0x6b, 0xf4, 0xfc, 0x99, 0x21, + 0xd2, 0x4b, 0xa5, 0x8c, 0xd6, 0x02, 0xed, 0x31, 0x09, 0xb2, 0xd0, 0xa2, 0x5a, 0x66, 0x16, 0x9a, + 0x72, 0xa1, 0xcf, 0x42, 0x3b, 0x25, 0x81, 0xf0, 0x07, 0xf2, 0xa2, 0x8c, 0x32, 0x84, 0xa9, 0x11, + 0x12, 0x5f, 0xc5, 0x92, 0x76, 0xbd, 0x89, 0x00, 0x65, 0x78, 0xbc, 0x00, 0x88, 0xea, 0x41, 0xf1, + 0x9c, 0x35, 0xb5, 0x0e, 0x1e, 0xcf, 0x59, 0xd3, 0x4b, 0x4a, 0xba, 0x8f, 0x8d, 0x70, 0xf9, 0xed, + 0x8a, 0x22, 0xff, 0xdc, 0x00, 0x94, 0x2c, 0x1d, 0xa1, 0x07, 0xe9, 0xd2, 0x53, 0xab, 0xeb, 0xcd, + 0x77, 0x5f, 0x8f, 0x39, 0xcd, 0x21, 0x47, 0x2a, 0xf5, 0x18, 0xf7, 0xe8, 0x25, 0x55, 0xea, 0xc7, + 0x06, 0x54, 0xb5, 0xba, 0x13, 0xba, 0x97, 0xb1, 0xa6, 0xb1, 0xa2, 0x7b, 0xf3, 0xed, 0x57, 0xf2, + 0xa5, 0x25, 0xf3, 0xca, 0x0e, 0x90, 0xb7, 0x9a, 0x9f, 0x1a, 0x50, 0xd3, 0xeb, 0x54, 0x28, 0x43, + 0x76, 0xa2, 0x68, 0xdf, 0x5c, 0x7f, 0x35, 0xe3, 0xd5, 0xcb, 0x13, 0x5d, 0x68, 0x06, 0x50, 0x10, + 0x95, 0xad, 0xb4, 0x8d, 0xaf, 0x97, 0xfb, 0xd3, 0x36, 0x7e, 0xac, 0x2c, 0x96, 0xb2, 0xf1, 0x3d, + 0x77, 0x40, 0x94, 0x63, 0x26, 0x4a, 0x5f, 0x59, 0x68, 0x57, 0x1f, 0xb3, 0x58, 0xdd, 0x2c, 0x0b, + 0x2d, 0x3a, 0x66, 0xb2, 0xe6, 0x85, 0x32, 0x84, 0xbd, 0xe2, 0x98, 0xc5, 0x4b, 0x66, 0x29, 0xc7, + 0x8c, 0x01, 0x2a, 0xc7, 0x2c, 0xaa, 0x4e, 0xa5, 0x1d, 0xb3, 0xc4, 0xeb, 0x45, 0xda, 0x31, 0x4b, + 0x16, 0xb8, 0x52, 0xd6, 0x91, 0xe1, 0x6a, 0xc7, 0x6c, 0x29, 0xa5, 0x90, 0x85, 0xde, 0xcd, 0x30, + 0x62, 0xea, 0xa3, 0x48, 0xf3, 0xbd, 0xd7, 0xe4, 0xce, 0xdc, 0xe3, 0xdc, 0xfc, 0x72, 0x8f, 0xff, + 0x9d, 0x01, 0xcb, 0x69, 0x45, 0x30, 0x94, 0x81, 0x93, 0xf1, 0x98, 0xd2, 0xdc, 0x78, 0x5d, 0xf6, + 0xab, 0xad, 0x15, 0xee, 0xfa, 0x47, 0xf5, 0x7f, 0xfb, 0x72, 0xd5, 0xf8, 0xcf, 0x2f, 0x57, 0x8d, + 0xff, 0xf9, 0x72, 0xd5, 0xf8, 0xfb, 0xff, 0x5d, 0x9d, 0x39, 0x99, 0x67, 0xff, 0xd5, 0xf8, 0xdb, + 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x54, 0x11, 0xdf, 0xef, 0x3c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KVClient is the client API for KV service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KVClient interface { + // Range gets the keys in the range from the key-value store. + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KVServer is the server API for KV service. +type KVServer interface { + // Range gets the keys in the range from the key-value store. + Range(context.Context, *RangeRequest) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(context.Context, *PutRequest) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) +} + +// UnimplementedKVServer can be embedded to have forward compatible implementations. +type UnimplementedKVServer struct { +} + +func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") +} +func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented") +} +func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented") +} +func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// WatchClient is the client API for Watch service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// WatchServer is the server API for Watch service. +type WatchServer interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(Watch_WatchServer) error +} + +// UnimplementedWatchServer can be embedded to have forward compatible implementations. +type UnimplementedWatchServer struct { +} + +func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// LeaseClient is the client API for Lease service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LeaseClient interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { + out := new(LeaseLeasesResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LeaseServer is the server API for Lease service. +type LeaseServer interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) +} + +// UnimplementedLeaseServer can be embedded to have forward compatible implementations. +type UnimplementedLeaseServer struct { +} + +func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented") +} +func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented") +} +func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error { + return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented") +} +func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented") +} +func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented") +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseLeases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + { + MethodName: "LeaseLeases", + Handler: _Lease_LeaseLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// ClusterClient is the client API for Cluster service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterClient interface { + // MemberAdd adds a member into the cluster. + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServer is the server API for Cluster service. +type ClusterServer interface { + // MemberAdd adds a member into the cluster. + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) +} + +// UnimplementedClusterServer can be embedded to have forward compatible implementations. +type UnimplementedClusterServer struct { +} + +func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented") +} +func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented") +} +func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented") +} +func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented") +} +func (*UnimplementedClusterServer) MemberPromote(ctx context.Context, req *MemberPromoteRequest) (*MemberPromoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MemberPromote not implemented") +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// MaintenanceClient is the client API for Maintenance service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MaintenanceClient interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + // Status gets the status of the member. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { + out := new(HashKVResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) { + out := new(DowngradeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MaintenanceServer is the server API for Maintenance service. +type MaintenanceServer interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + // Status gets the status of the member. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + Hash(context.Context, *HashRequest) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error) +} + +// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations. +type UnimplementedMaintenanceServer struct { +} + +func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented") +} +func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented") +} +func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented") +} +func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented") +} +func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error { + return status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} +func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented") +} +func (*UnimplementedMaintenanceServer) Downgrade(ctx context.Context, req *DowngradeRequest) (*DowngradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Downgrade not implemented") +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashKVRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).HashKV(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/HashKV", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DowngradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Downgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Downgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + { + MethodName: "HashKV", + Handler: _Maintenance_HashKV_Handler, + }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, + { + MethodName: "Downgrade", + Handler: _Maintenance_Downgrade_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// AuthClient is the client API for Auth service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AuthClient interface { + // AuthEnable enables authentication. + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + // AuthStatus displays authentication status. + AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) + // Authenticate processes an authenticate request. + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) { + out := new(AuthStatusResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AuthServer is the server API for Auth service. +type AuthServer interface { + // AuthEnable enables authentication. + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + // AuthStatus displays authentication status. + AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error) + // Authenticate processes an authenticate request. + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + // UserAdd adds a new user. User name cannot be empty. + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. Role name cannot be empty. + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +// UnimplementedAuthServer can be embedded to have forward compatible implementations. +type UnimplementedAuthServer struct { +} + +func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented") +} +func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented") +} +func (*UnimplementedAuthServer) AuthStatus(ctx context.Context, req *AuthStatusRequest) (*AuthStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AuthStatus not implemented") +} +func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented") +} +func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented") +} +func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented") +} +func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented") +} +func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented") +} +func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented") +} +func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented") +} +func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented") +} +func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented") +} +func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented") +} +func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented") +} +func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented") +} +func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented") +} +func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented") +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "AuthStatus", + Handler: _Auth_AuthStatus_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RaftTerm != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + i-- + dAtA[i] = 0x20 + } + if m.Revision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + } + if m.MemberId != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) + i-- + dAtA[i] = 0x10 + } + if m.ClusterId != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MaxCreateRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) + i-- + dAtA[i] = 0x68 + } + if m.MinCreateRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) + i-- + dAtA[i] = 0x60 + } + if m.MaxModRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) + i-- + dAtA[i] = 0x58 + } + if m.MinModRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) + i-- + dAtA[i] = 0x50 + } + if m.CountOnly { + i-- + if m.CountOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.KeysOnly { + i-- + if m.KeysOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Serializable { + i-- + if m.Serializable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.SortTarget != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + i-- + dAtA[i] = 0x30 + } + if m.SortOrder != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) + i-- + dAtA[i] = 0x28 + } + if m.Revision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x20 + } + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x18 + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Count != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x20 + } + if m.More { + i-- + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Kvs) > 0 { + for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IgnoreLease { + i-- + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IgnoreValue { + i-- + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.PrevKv { + i-- + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Lease != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PrevKv != nil { + { + size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PrevKv { + i-- + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PrevKvs) > 0 { + for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Deleted != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Request != nil { + { + size := m.Request.Size() + i -= size + if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestRange != nil { + { + size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestPut != nil { + { + size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestDeleteRange != nil { + { + size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestTxn != nil { + { + size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Response != nil { + { + size := m.Response.Size() + i -= size + if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseRange != nil { + { + size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponsePut != nil { + { + size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseDeleteRange != nil { + { + size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseTxn != nil { + { + size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Compare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Compare) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x4 + i-- + dAtA[i] = 0x82 + } + if m.TargetUnion != nil { + { + size := m.TargetUnion.Size() + i -= size + if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + } + if m.Target != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) + i-- + dAtA[i] = 0x10 + } + if m.Result != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Value != nil { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + i-- + dAtA[i] = 0x40 + return len(dAtA) - i, nil +} +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Failure) > 0 { + for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Success) > 0 { + for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Compare) > 0 { + for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Responses) > 0 { + for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Succeeded { + i-- + if m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Physical { + i-- + if m.Physical { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Revision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Revision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CompactRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + i-- + dAtA[i] = 0x18 + } + if m.Hash != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Hash != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Blob) > 0 { + i -= len(m.Blob) + copy(dAtA[i:], m.Blob) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i-- + dAtA[i] = 0x1a + } + if m.RemainingBytes != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RequestUnion != nil { + { + size := m.RequestUnion.Size() + i -= size + if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CreateRequest != nil { + { + size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CancelRequest != nil { + { + size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProgressRequest != nil { + { + size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Fragment { + i-- + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.WatchId != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + i-- + dAtA[i] = 0x38 + } + if m.PrevKv { + i-- + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Filters) > 0 { + dAtA22 := make([]byte, len(m.Filters)*10) + var j21 int + for _, num := range m.Filters { + for num >= 1<<7 { + dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j21++ + } + dAtA22[j21] = uint8(num) + j21++ + } + i -= j21 + copy(dAtA[i:], dAtA22[:j21]) + i = encodeVarintRpc(dAtA, i, uint64(j21)) + i-- + dAtA[i] = 0x2a + } + if m.ProgressNotify { + i-- + if m.ProgressNotify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.StartRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) + i-- + dAtA[i] = 0x18 + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.WatchId != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Fragment { + i-- + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.CancelReason) > 0 { + i -= len(m.CancelReason) + copy(dAtA[i:], m.CancelReason) + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i-- + dAtA[i] = 0x32 + } + if m.CompactRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + i-- + dAtA[i] = 0x28 + } + if m.Canceled { + i-- + if m.Canceled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Created { + i-- + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.WatchId != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x10 + } + if m.TTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if m.TTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + i-- + dAtA[i] = 0x18 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Remaining_TTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) + i-- + dAtA[i] = 0x10 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Checkpoints) > 0 { + for iNdEx := len(m.Checkpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Checkpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + i-- + dAtA[i] = 0x18 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Keys { + i-- + if m.Keys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.GrantedTTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) + i-- + dAtA[i] = 0x20 + } + if m.TTL != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + i-- + dAtA[i] = 0x18 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Leases) > 0 { + for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsLearner { + i-- + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ClientURLs) > 0 { + for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClientURLs[iNdEx]) + copy(dAtA[i:], m.ClientURLs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.PeerURLs) > 0 { + for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PeerURLs[iNdEx]) + copy(dAtA[i:], m.PeerURLs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsLearner { + i-- + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.PeerURLs) > 0 { + for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PeerURLs[iNdEx]) + copy(dAtA[i:], m.PeerURLs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Member != nil { + { + size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PeerURLs) > 0 { + for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PeerURLs[iNdEx]) + copy(dAtA[i:], m.PeerURLs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Linearizable { + i-- + if m.Linearizable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberPromoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemberPromoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TargetID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Alarm != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + i-- + dAtA[i] = 0x18 + } + if m.MemberID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + i-- + dAtA[i] = 0x10 + } + if m.Action != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Alarm != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + i-- + dAtA[i] = 0x10 + } + if m.MemberID != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Alarms) > 0 { + for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DowngradeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DowngradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DowngradeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DowngradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsLearner { + i-- + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.DbSizeInUse != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + i-- + dAtA[i] = 0x48 + } + if len(m.Errors) > 0 { + for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Errors[iNdEx]) + copy(dAtA[i:], m.Errors[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Errors[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.RaftAppliedIndex != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) + i-- + dAtA[i] = 0x38 + } + if m.RaftTerm != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + i-- + dAtA[i] = 0x30 + } + if m.RaftIndex != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) + i-- + dAtA[i] = 0x28 + } + if m.Leader != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) + i-- + dAtA[i] = 0x20 + } + if m.DbSize != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) + i-- + dAtA[i] = 0x18 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.HashedPassword) > 0 { + i -= len(m.HashedPassword) + copy(dAtA[i:], m.HashedPassword) + i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword))) + i-- + dAtA[i] = 0x22 + } + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.HashedPassword) > 0 { + i -= len(m.HashedPassword) + copy(dAtA[i:], m.HashedPassword) + i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword))) + i-- + dAtA[i] = 0x1a + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Perm != nil { + { + size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RangeEnd) > 0 { + i -= len(m.RangeEnd) + copy(dAtA[i:], m.RangeEnd) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AuthRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.AuthRevision)) + i-- + dAtA[i] = 0x18 + } + if m.Enabled { + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Token) > 0 { + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Roles) > 0 { + for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Roles[iNdEx]) + copy(dAtA[i:], m.Roles[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Perm) > 0 { + for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Roles) > 0 { + for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Roles[iNdEx]) + copy(dAtA[i:], m.Roles[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + offset -= sovRpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResponseHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRpc(uint64(m.ClusterId)) + } + if m.MemberId != 0 { + n += 1 + sovRpc(uint64(m.MemberId)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RangeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.SortOrder != 0 { + n += 1 + sovRpc(uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + n += 1 + sovRpc(uint64(m.SortTarget)) + } + if m.Serializable { + n += 2 + } + if m.KeysOnly { + n += 2 + } + if m.CountOnly { + n += 2 + } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RangeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.More { + n += 2 + } + if m.Count != 0 { + n += 1 + sovRpc(uint64(m.Count)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovRpc(uint64(m.Lease)) + } + if m.PrevKv { + n += 2 + } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteRangeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteRangeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Deleted != 0 { + n += 1 + sovRpc(uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestOp_RequestRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestRange != nil { + l = m.RequestRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestPut) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestPut != nil { + l = m.RequestPut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestDeleteRange != nil { + l = m.RequestDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestTxn) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseOp_ResponseRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseRange != nil { + l = m.ResponseRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponsePut) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponsePut != nil { + l = m.ResponsePut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseDeleteRange != nil { + l = m.ResponseDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseTxn) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovRpc(uint64(m.Result)) + } + if m.Target != 0 { + n += 1 + sovRpc(uint64(m.Target)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.TargetUnion != nil { + n += m.TargetUnion.Size() + } + l = len(m.RangeEnd) + if l > 0 { + n += 2 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Compare_Version) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare_Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovRpc(uint64(m.Lease)) + return n +} +func (m *TxnRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Compare) > 0 { + for _, e := range m.Compare { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Success) > 0 { + for _, e := range m.Success { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Failure) > 0 { + for _, e := range m.Failure { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TxnResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Succeeded { + n += 2 + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CompactionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.Physical { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CompactionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HashRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HashKVRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HashKVResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HashResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RemainingBytes != 0 { + n += 1 + sovRpc(uint64(m.RemainingBytes)) + } + l = len(m.Blob) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestUnion != nil { + n += m.RequestUnion.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchRequest_CreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CreateRequest != nil { + l = m.CreateRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_CancelRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CancelRequest != nil { + l = m.CancelRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_ProgressRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProgressRequest != nil { + l = m.ProgressRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchCreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.StartRevision != 0 { + n += 1 + sovRpc(uint64(m.StartRevision)) + } + if m.ProgressNotify { + n += 2 + } + if len(m.Filters) > 0 { + l = 0 + for _, e := range m.Filters { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PrevKv { + n += 2 + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Fragment { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchCancelRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchProgressRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Created { + n += 2 + } + if m.Canceled { + n += 2 + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Fragment { + n += 2 + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseGrantRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseGrantResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseRevokeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseCheckpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + n += 1 + sovRpc(uint64(m.Remaining_TTL)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseCheckpointRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, e := range m.Checkpoints { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseCheckpointResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseKeepAliveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseKeepAliveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseTimeToLiveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseLeasesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LeaseLeasesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Member) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberAddRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberAddResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberRemoveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberRemoveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberUpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberUpdateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linearizable { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberPromoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemberPromoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DefragmentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DefragmentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MoveLeaderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AlarmRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AlarmMember) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AlarmResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Alarms) > 0 { + for _, e := range m.Alarms { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DowngradeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DowngradeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSize != 0 { + n += 1 + sovRpc(uint64(m.DbSize)) + } + if m.Leader != 0 { + n += 1 + sovRpc(uint64(m.Leader)) + } + if m.RaftIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + if m.RaftAppliedIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } + if m.IsLearner { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthEnableRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthDisableRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthenticateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserAddRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.HashedPassword) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserGetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserDeleteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserChangePasswordRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.HashedPassword) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserGrantRoleRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleAddRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleGetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleDeleteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Perm != nil { + l = m.Perm.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthEnableResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthDisableResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Enabled { + n += 2 + } + if m.AuthRevision != 0 { + n += 1 + sovRpc(uint64(m.AuthRevision)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthenticateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserAddResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserGetResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserChangePasswordResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleAddResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleGetResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Perm) > 0 { + for _, e := range m.Perm { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthUserListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) + } + m.SortOrder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) + } + m.SortTarget = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Serializable = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeysOnly = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinModRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxModRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinCreateRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxCreateRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + m.Deleted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Deleted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestPut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponsePut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Compare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= Compare_CompareResult(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + m.Target = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Target |= Compare_CompareTarget(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Version{v} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_CreateRevision{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_ModRevision{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Lease{v} + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compare = append(m.Compare, &Compare{}) + if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, &RequestOp{}) + if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failure = append(m.Failure, &RequestOp{}) + if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Succeeded = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &ResponseOp{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Physical = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) + } + m.RemainingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) + if m.Blob == nil { + m.Blob = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCreateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CreateRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCancelRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CancelRequest{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchProgressRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_ProgressRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) + } + m.StartRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressNotify = bool(v != 0) + case 5: + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= WatchCreateRequest_FilterType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.Filters) == 0 { + m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount) + } + for iNdEx < postIndex { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= WatchCreateRequest_FilterType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) + } + m.Remaining_TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Remaining_TTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) + if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GrantedTTL |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &LeaseStatus{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Linearizable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Linearizable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= AlarmType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= AlarmType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alarms = append(m.Alarms, &AlarmMember{}) + if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DowngradeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DowngradeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DowngradeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= DowngradeRequest_DowngradeAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DowngradeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DowngradeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DowngradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) + } + m.DbSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) + } + m.RaftAppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftAppliedIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) + } + m.DbSizeInUse = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSizeInUse |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &authpb.UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HashedPassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HashedPassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perm == nil { + m.Perm = &authpb.Permission{} + } + if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Perm = append(m.Perm, &authpb.Permission{}) + if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRpc + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRpc + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRpc + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto new file mode 100644 index 0000000000..4ccc234744 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto @@ -0,0 +1,1199 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/api/mvccpb/kv.proto"; +import "etcd/api/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3/lease/revoke" + body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3/lease/timetolive" + body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } + }; + } + + // LeaseLeases lists all existing leases. + rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { + option (google.api.http) = { + post: "/v3/lease/leases" + body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } + }; + } +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3/maintenance/defragment" + body: "*" + }; + } + + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + rpc HashKV(HashKVRequest) returns (HashKVResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hashkv" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3/maintenance/snapshot" + body: "*" + }; + } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3/maintenance/transfer-leadership" + body: "*" + }; + } + + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) { + option (google.api.http) = { + post: "/v3/maintenance/downgrade" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3/auth/disable" + body: "*" + }; + } + + // AuthStatus displays authentication status. + rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) { + option (google.api.http) = { + post: "/v3/auth/status" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. User name cannot be empty. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd adds a new role. Role name cannot be empty. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE = 3; + LEASE = 4; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + // lease is the lease id of the given key. + int64 lease = 8; + // leave room for more target_union field tags, jump to 64 + } + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 64; + // TODO: fill out with most of the rest of RangeRequest fields when needed. +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashKVRequest { + // revision is the key-value store revision for the hash operation. + int64 revision = 1; +} + +message HashKVResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + uint32 hash = 2; + // compact_revision is the compacted revision of key-value store when hash begins. + int64 compact_revision = 3; +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's KV's backend. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + WatchProgressRequest progress_request = 3; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; + + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + int64 watch_id = 7; + + // fragment enables splitting large revisions into multiple watch responses. + bool fragment = 8; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +message WatchProgressRequest { +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + // framgment is true if large watch response was split over multiple responses. + bool fragment = 7; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message LeaseLeasesRequest { +} + +message LeaseStatus { + int64 ID = 1; + // TODO: int64 TTL = 2; +} + +message LeaseLeasesResponse { + ResponseHeader header = 1; + repeated LeaseStatus leases = 2; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { + bool linearizable = 1; +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted + CORRUPT = 2; // kv store corruption detected +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message DowngradeRequest { + enum DowngradeAction { + VALIDATE = 0; + ENABLE = 1; + CANCEL = 2; + } + + // action is the kind of downgrade request to issue. The action may + // VALIDATE the target version, DOWNGRADE the cluster version, + // or CANCEL the current downgrading job. + DowngradeAction action = 1; + // version is the target version to downgrade. + string version = 2; +} + +message DowngradeResponse { + ResponseHeader header = 1; + // version is the current cluster version. + string version = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft committed index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthStatusRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; + authpb.UserAddOptions options = 3; + string hashedPassword = 4; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. Note that this field will be removed in the API layer. + string password = 2; + // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. + string hashedPassword = 3; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + bytes key = 2; + bytes range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthStatusResponse { + ResponseHeader header = 1; + bool enabled = 2; + // authRevision is the current revision of auth store + uint64 authRevision = 3; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go new file mode 100644 index 0000000000..cf0d428180 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go @@ -0,0 +1,1454 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: membership.proto + +package membershippb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RaftAttributes represents the raft related attributes of an etcd member. +type RaftAttributes struct { + // peerURLs is the list of peers in the raft cluster. + PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftAttributes) Reset() { *m = RaftAttributes{} } +func (m *RaftAttributes) String() string { return proto.CompactTextString(m) } +func (*RaftAttributes) ProtoMessage() {} +func (*RaftAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{0} +} +func (m *RaftAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RaftAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftAttributes.Merge(m, src) +} +func (m *RaftAttributes) XXX_Size() int { + return m.Size() +} +func (m *RaftAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_RaftAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftAttributes proto.InternalMessageInfo + +// Attributes represents all the non-raft related attributes of an etcd member. +type Attributes struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ClientUrls []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attributes) Reset() { *m = Attributes{} } +func (m *Attributes) String() string { return proto.CompactTextString(m) } +func (*Attributes) ProtoMessage() {} +func (*Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{1} +} +func (m *Attributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attributes.Merge(m, src) +} +func (m *Attributes) XXX_Size() int { + return m.Size() +} +func (m *Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Attributes proto.InternalMessageInfo + +type Member struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + RaftAttributes *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"` + MemberAttributes *Attributes `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{2} +} +func (m *Member) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Member.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Member) XXX_Merge(src proto.Message) { + xxx_messageInfo_Member.Merge(m, src) +} +func (m *Member) XXX_Size() int { + return m.Size() +} +func (m *Member) XXX_DiscardUnknown() { + xxx_messageInfo_Member.DiscardUnknown(m) +} + +var xxx_messageInfo_Member proto.InternalMessageInfo + +type ClusterVersionSetRequest struct { + Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterVersionSetRequest) Reset() { *m = ClusterVersionSetRequest{} } +func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterVersionSetRequest) ProtoMessage() {} +func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{3} +} +func (m *ClusterVersionSetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterVersionSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterVersionSetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterVersionSetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterVersionSetRequest.Merge(m, src) +} +func (m *ClusterVersionSetRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterVersionSetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterVersionSetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterVersionSetRequest proto.InternalMessageInfo + +type ClusterMemberAttrSetRequest struct { + Member_ID uint64 `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"` + MemberAttributes *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterMemberAttrSetRequest) Reset() { *m = ClusterMemberAttrSetRequest{} } +func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterMemberAttrSetRequest) ProtoMessage() {} +func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{4} +} +func (m *ClusterMemberAttrSetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterMemberAttrSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterMemberAttrSetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterMemberAttrSetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterMemberAttrSetRequest.Merge(m, src) +} +func (m *ClusterMemberAttrSetRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterMemberAttrSetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterMemberAttrSetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterMemberAttrSetRequest proto.InternalMessageInfo + +type DowngradeInfoSetRequest struct { + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Ver string `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DowngradeInfoSetRequest) Reset() { *m = DowngradeInfoSetRequest{} } +func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) } +func (*DowngradeInfoSetRequest) ProtoMessage() {} +func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{5} +} +func (m *DowngradeInfoSetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DowngradeInfoSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DowngradeInfoSetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DowngradeInfoSetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DowngradeInfoSetRequest.Merge(m, src) +} +func (m *DowngradeInfoSetRequest) XXX_Size() int { + return m.Size() +} +func (m *DowngradeInfoSetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DowngradeInfoSetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DowngradeInfoSetRequest proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes") + proto.RegisterType((*Attributes)(nil), "membershippb.Attributes") + proto.RegisterType((*Member)(nil), "membershippb.Member") + proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest") + proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest") + proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest") +} + +func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) } + +var fileDescriptor_949fe0d019050ef5 = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xf2, 0x40, + 0x14, 0x85, 0x99, 0x42, 0xf8, 0xdb, 0xcb, 0x1f, 0xc4, 0x09, 0x89, 0x8d, 0x68, 0x25, 0x5d, 0xb1, + 0x30, 0x98, 0xe8, 0x13, 0xa0, 0xb0, 0x20, 0x81, 0xcd, 0x18, 0xdd, 0x92, 0x56, 0x2e, 0xd8, 0xa4, + 0x74, 0xea, 0xcc, 0x54, 0xd7, 0xbe, 0x85, 0x4f, 0xe0, 0xb3, 0xb0, 0xf4, 0x11, 0x14, 0x5f, 0xc4, + 0x74, 0x5a, 0x4a, 0x49, 0xdc, 0xb8, 0xbb, 0x3d, 0xbd, 0xf7, 0x9c, 0xf3, 0x35, 0x85, 0xd6, 0x0a, + 0x57, 0x3e, 0x0a, 0xf9, 0x18, 0xc4, 0xfd, 0x58, 0x70, 0xc5, 0xe9, 0xff, 0x9d, 0x12, 0xfb, 0xc7, + 0xed, 0x25, 0x5f, 0x72, 0xfd, 0xe2, 0x22, 0x9d, 0xb2, 0x1d, 0x77, 0x02, 0x4d, 0xe6, 0x2d, 0xd4, + 0x40, 0x29, 0x11, 0xf8, 0x89, 0x42, 0x49, 0x3b, 0x60, 0xc5, 0x88, 0x62, 0x96, 0x88, 0x50, 0xda, + 0xa4, 0x5b, 0xed, 0x59, 0xcc, 0x4c, 0x85, 0x3b, 0x11, 0x4a, 0x7a, 0x0a, 0x10, 0xc8, 0x59, 0x88, + 0x9e, 0x88, 0x50, 0xd8, 0x46, 0x97, 0xf4, 0x4c, 0x66, 0x05, 0x72, 0x92, 0x09, 0xee, 0x00, 0xa0, + 0xe4, 0x44, 0xa1, 0x16, 0x79, 0x2b, 0xb4, 0x49, 0x97, 0xf4, 0x2c, 0xa6, 0x67, 0x7a, 0x06, 0x8d, + 0x87, 0x30, 0xc0, 0x48, 0x65, 0xfe, 0x86, 0xf6, 0x87, 0x4c, 0x4a, 0x13, 0xdc, 0x77, 0x02, 0xf5, + 0xa9, 0xee, 0x4d, 0x9b, 0x60, 0x8c, 0x87, 0xfa, 0xba, 0xc6, 0x8c, 0xf1, 0x90, 0x8e, 0xe0, 0x40, + 0x78, 0x0b, 0x35, 0xf3, 0x8a, 0x08, 0xdd, 0xa0, 0x71, 0x79, 0xd2, 0x2f, 0x93, 0xf6, 0xf7, 0x81, + 0x58, 0x53, 0xec, 0x03, 0x8e, 0xe0, 0x30, 0x5b, 0x2f, 0x1b, 0x55, 0xb5, 0x91, 0xbd, 0x6f, 0x54, + 0x32, 0xc9, 0xbf, 0xee, 0x4e, 0x71, 0xcf, 0xc1, 0xbe, 0x09, 0x13, 0xa9, 0x50, 0xdc, 0xa3, 0x90, + 0x01, 0x8f, 0x6e, 0x51, 0x31, 0x7c, 0x4a, 0x50, 0x2a, 0xda, 0x82, 0xea, 0x33, 0x8a, 0x1c, 0x3c, + 0x1d, 0xdd, 0x57, 0x02, 0x9d, 0x7c, 0x7d, 0x5a, 0x38, 0x95, 0x2e, 0x3a, 0x60, 0xe5, 0xa5, 0x0a, + 0x64, 0x33, 0x13, 0x34, 0xf8, 0x2f, 0x8d, 0x8d, 0x3f, 0x37, 0x1e, 0xc1, 0xd1, 0x90, 0xbf, 0x44, + 0x4b, 0xe1, 0xcd, 0x71, 0x1c, 0x2d, 0x78, 0x29, 0xde, 0x86, 0x7f, 0x18, 0x79, 0x7e, 0x88, 0x73, + 0x1d, 0x6e, 0xb2, 0xed, 0xe3, 0x16, 0xc5, 0x28, 0x50, 0xae, 0xdb, 0xeb, 0x2f, 0xa7, 0xb2, 0xde, + 0x38, 0xe4, 0x63, 0xe3, 0x90, 0xcf, 0x8d, 0x43, 0xde, 0xbe, 0x9d, 0x8a, 0x5f, 0xd7, 0xff, 0xd3, + 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x7d, 0x0b, 0x87, 0x02, 0x00, 0x00, +} + +func (m *RaftAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RaftAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsLearner { + i-- + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.PeerUrls) > 0 { + for iNdEx := len(m.PeerUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PeerUrls[iNdEx]) + copy(dAtA[i:], m.PeerUrls[iNdEx]) + i = encodeVarintMembership(dAtA, i, uint64(len(m.PeerUrls[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Attributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ClientUrls) > 0 { + for iNdEx := len(m.ClientUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClientUrls[iNdEx]) + copy(dAtA[i:], m.ClientUrls[iNdEx]) + i = encodeVarintMembership(dAtA, i, uint64(len(m.ClientUrls[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMembership(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemberAttributes != nil { + { + size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMembership(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.RaftAttributes != nil { + { + size, err := m.RaftAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMembership(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ID != 0 { + i = encodeVarintMembership(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterVersionSetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterVersionSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ver) > 0 { + i -= len(m.Ver) + copy(dAtA[i:], m.Ver) + i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterMemberAttrSetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterMemberAttrSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemberAttributes != nil { + { + size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMembership(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Member_ID != 0 { + i = encodeVarintMembership(dAtA, i, uint64(m.Member_ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DowngradeInfoSetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DowngradeInfoSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ver) > 0 { + i -= len(m.Ver) + copy(dAtA[i:], m.Ver) + i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver))) + i-- + dAtA[i] = 0x12 + } + if m.Enabled { + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMembership(dAtA []byte, offset int, v uint64) int { + offset -= sovMembership(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RaftAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PeerUrls) > 0 { + for _, s := range m.PeerUrls { + l = len(s) + n += 1 + l + sovMembership(uint64(l)) + } + } + if m.IsLearner { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Attributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMembership(uint64(l)) + } + if len(m.ClientUrls) > 0 { + for _, s := range m.ClientUrls { + l = len(s) + n += 1 + l + sovMembership(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Member) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovMembership(uint64(m.ID)) + } + if m.RaftAttributes != nil { + l = m.RaftAttributes.Size() + n += 1 + l + sovMembership(uint64(l)) + } + if m.MemberAttributes != nil { + l = m.MemberAttributes.Size() + n += 1 + l + sovMembership(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterVersionSetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ver) + if l > 0 { + n += 1 + l + sovMembership(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterMemberAttrSetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Member_ID != 0 { + n += 1 + sovMembership(uint64(m.Member_ID)) + } + if m.MemberAttributes != nil { + l = m.MemberAttributes.Size() + n += 1 + l + sovMembership(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DowngradeInfoSetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Enabled { + n += 2 + } + l = len(m.Ver) + if l > 0 { + n += 1 + l + sovMembership(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMembership(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMembership(x uint64) (n int) { + return sovMembership(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RaftAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RaftAttributes == nil { + m.RaftAttributes = &RaftAttributes{} + } + if err := m.RaftAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemberAttributes == nil { + m.MemberAttributes = &Attributes{} + } + if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterVersionSetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterVersionSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterMemberAttrSetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterMemberAttrSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Member_ID", wireType) + } + m.Member_ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Member_ID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemberAttributes == nil { + m.MemberAttributes = &Attributes{} + } + if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DowngradeInfoSetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DowngradeInfoSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMembership + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMembership + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMembership + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMembership(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMembership + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMembership(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMembership + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMembership + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMembership + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMembership + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMembership + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMembership + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMembership = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMembership = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto new file mode 100644 index 0000000000..e63e9ecc99 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package membershippb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +// RaftAttributes represents the raft related attributes of an etcd member. +message RaftAttributes { + // peerURLs is the list of peers in the raft cluster. + repeated string peer_urls = 1; + // isLearner indicates if the member is raft learner. + bool is_learner = 2; +} + +// Attributes represents all the non-raft related attributes of an etcd member. +message Attributes { + string name = 1; + repeated string client_urls = 2; +} + +message Member { + uint64 ID = 1; + RaftAttributes raft_attributes = 2; + Attributes member_attributes = 3; +} + +message ClusterVersionSetRequest { + string ver = 1; +} + +message ClusterMemberAttrSetRequest { + uint64 member_ID = 1; + Attributes member_attributes = 2; +} + +message DowngradeInfoSetRequest { + bool enabled = 1; + string ver = 2; +} \ No newline at end of file diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go new file mode 100644 index 0000000000..fc258d6c20 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go @@ -0,0 +1,798 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kv.proto + +package mvccpb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} + +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} + +func (Event_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{1, 0} +} + +type KeyValue struct { + // key is the key in bytes. An empty key is not allowed. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // create_revision is the revision of last creation on this key. + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + // mod_revision is the revision of last modification on this key. + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + // value is the value held by the key, in bytes. + Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{0} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return m.Size() +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{1} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") +} + +func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } + +var fileDescriptor_2216fe83c9c12408 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} + +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Lease != 0 { + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) + i-- + dAtA[i] = 0x30 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x2a + } + if m.Version != 0 { + i = encodeVarintKv(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x20 + } + if m.ModRevision != 0 { + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) + i-- + dAtA[i] = 0x18 + } + if m.CreateRevision != 0 { + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) + i-- + dAtA[i] = 0x10 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PrevKv != nil { + { + size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintKv(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Kv != nil { + { + size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintKv(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintKv(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + offset -= sovKv(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *KeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.CreateRevision != 0 { + n += 1 + sovKv(uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + n += 1 + sovKv(uint64(m.ModRevision)) + } + if m.Version != 0 { + n += 1 + sovKv(uint64(m.Version)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovKv(uint64(m.Lease)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovKv(uint64(m.Type)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovKv(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + m.CreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + m.ModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Event_EventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthKv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthKv + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupKv + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthKv + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto new file mode 100644 index 0000000000..23c911b7da --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package mvccpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message KeyValue { + // key is the key in bytes. An empty key is not allowed. + bytes key = 1; + // create_revision is the revision of last creation on this key. + int64 create_revision = 2; + // mod_revision is the revision of last modification on this key. + int64 mod_revision = 3; + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + int64 version = 4; + // value is the value held by the key, in bytes. + bytes value = 5; + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + int64 lease = 6; +} + +message Event { + enum EventType { + PUT = 0; + DELETE = 1; + } + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + EventType type = 1; + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + KeyValue kv = 2; + + // prev_kv holds the key-value pair before the event happens. + KeyValue prev_kv = 3; +} diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go new file mode 100644 index 0000000000..f72c6a644f --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. +package rpctypes diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go new file mode 100644 index 0000000000..23201302e8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go @@ -0,0 +1,267 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// server-side error +var ( + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() + + ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err() + + ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() + ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() + ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() + ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() + ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() + ErrGRPCClusterIdMismatch = status.New(codes.FailedPrecondition, "etcdserver: cluster ID mismatch").Err() + + ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() + ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() + + ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() + ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() + ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() + ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() + ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() + ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() + ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() + ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() + ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err() + ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() + ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() + ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() + ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() + ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() + ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err() + + ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() + ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() + ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() + ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() + ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() + ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() + ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() + ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err() + ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() + ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() + + ErrGRPCClusterVersionUnavailable = status.New(codes.Unavailable, "etcdserver: cluster version not found during downgrade").Err() + ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err() + ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err() + ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err() + ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err() + + ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: request canceled").Err() + ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: context deadline exceeded").Err() + + errStringToError = map[string]error{ + ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, + + ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, + ErrorDesc(ErrGRPCClusterIdMismatch): ErrGRPCClusterIdMismatch, + + ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, + ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision, + + ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, + ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, + + ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable, + ErrorDesc(ErrGRPCWrongDowngradeVersionFormat): ErrGRPCWrongDowngradeVersionFormat, + ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion, + ErrorDesc(ErrGRPCDowngradeInProcess): ErrGRPCDowngradeInProcess, + ErrorDesc(ErrGRPCNoInflightDowngrade): ErrGRPCNoInflightDowngrade, + } +) + +// client-side error +var ( + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) + + ErrMemberExist = Error(ErrGRPCMemberExist) + ErrPeerURLExist = Error(ErrGRPCPeerURLExist) + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) + ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) + ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) + ErrTooManyLearners = Error(ErrGRPCTooManyLearners) + + ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrUserNotFound = Error(ErrGRPCUserNotFound) + ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) + ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrRoleEmpty = Error(ErrGRPCRoleEmpty) + ErrAuthFailed = Error(ErrGRPCAuthFailed) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) + ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + ErrClusterIdMismatch = Error(ErrGRPCClusterIdMismatch) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) + ErrLeaderChanged = Error(ErrGRPCLeaderChanged) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex) + ErrUnhealthy = Error(ErrGRPCUnhealthy) + ErrCorrupt = Error(ErrGRPCCorrupt) + ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) + + ErrClusterVersionUnavailable = Error(ErrGRPCClusterVersionUnavailable) + ErrWrongDowngradeVersionFormat = Error(ErrGRPCWrongDowngradeVersionFormat) + ErrInvalidDowngradeTargetVersion = Error(ErrGRPCInvalidDowngradeTargetVersion) + ErrDowngradeInProcess = Error(ErrGRPCDowngradeInProcess) + ErrNoInflightDowngrade = Error(ErrGRPCNoInflightDowngrade) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + ev, ok := status.FromError(verr) + var desc string + if ok { + desc = ev.Message() + } else { + desc = verr.Error() + } + return EtcdError{code: ev.Code(), desc: desc} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go new file mode 100644 index 0000000000..90b8b835b1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + MetadataRequireLeaderKey = "hasleader" + MetadataHasLeader = "true" + + MetadataClientAPIVersionKey = "client-api-version" +) diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go new file mode 100644 index 0000000000..8f8ac60ff2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go @@ -0,0 +1,20 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + TokenFieldNameGRPC = "token" + TokenFieldNameSwagger = "authorization" +) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go new file mode 100644 index 0000000000..52be9f964f --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.5.11" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE b/vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go new file mode 100644 index 0000000000..e919f24993 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logutil includes utilities to facilitate logging. +package logutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go new file mode 100644 index 0000000000..6c95bcfe9f --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go @@ -0,0 +1,30 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "go.uber.org/zap/zapcore" +) + +var DefaultLogLevel = "info" + +// ConvertToZapLevel converts log level string to zapcore.Level. +func ConvertToZapLevel(lvl string) zapcore.Level { + var level zapcore.Level + if err := level.Set(lvl); err != nil { + panic(err) + } + return level +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go new file mode 100644 index 0000000000..34f35b9f28 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -0,0 +1,108 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "sort" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// CreateDefaultZapLogger creates a logger with default zap configuration +func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) { + lcfg := DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(level) + c, err := lcfg.Build() + if err != nil { + return nil, err + } + return c, nil +} + +// DefaultZapLoggerConfig defines default zap logger configuration. +var DefaultZapLoggerConfig = zap.Config{ + Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)), + + Development: false, + Sampling: &zap.SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + + Encoding: "json", + + // copied from "zap.NewProductionEncoderConfig" with some updates + EncoderConfig: zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + + // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps + EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700")) + }, + + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + + // Use "/dev/null" to discard all + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, +} + +// MergeOutputPaths merges logging output paths, resolving conflicts. +func MergeOutputPaths(cfg zap.Config) zap.Config { + outputs := make(map[string]struct{}) + for _, v := range cfg.OutputPaths { + outputs[v] = struct{}{} + } + outputSlice := make([]string, 0) + if _, ok := outputs["/dev/null"]; ok { + // "/dev/null" to discard all + outputSlice = []string{"/dev/null"} + } else { + for k := range outputs { + outputSlice = append(outputSlice, k) + } + } + cfg.OutputPaths = outputSlice + sort.Strings(cfg.OutputPaths) + + errOutputs := make(map[string]struct{}) + for _, v := range cfg.ErrorOutputPaths { + errOutputs[v] = struct{}{} + } + errOutputSlice := make([]string, 0) + if _, ok := errOutputs["/dev/null"]; ok { + // "/dev/null" to discard all + errOutputSlice = []string{"/dev/null"} + } else { + for k := range errOutputs { + errOutputSlice = append(errOutputSlice, k) + } + } + cfg.ErrorOutputPaths = errOutputSlice + sort.Strings(cfg.ErrorOutputPaths) + + return cfg +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go new file mode 100644 index 0000000000..9daa3e0aab --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go @@ -0,0 +1,93 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package logutil + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "go.etcd.io/etcd/client/pkg/v3/systemd" + + "github.com/coreos/go-systemd/v22/journal" + "go.uber.org/zap/zapcore" +) + +// NewJournalWriter wraps "io.Writer" to redirect log output +// to the local systemd journal. If journald send fails, it fails +// back to writing to the original writer. +// The decode overhead is only <30µs per write. +// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go +func NewJournalWriter(wr io.Writer) (io.Writer, error) { + return &journalWriter{Writer: wr}, systemd.DialJournal() +} + +type journalWriter struct { + io.Writer +} + +// WARN: assume that etcd uses default field names in zap encoder config +// make sure to keep this up-to-date! +type logLine struct { + Level string `json:"level"` + Caller string `json:"caller"` +} + +func (w *journalWriter) Write(p []byte) (int, error) { + line := &logLine{} + if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil { + return 0, err + } + + var pri journal.Priority + switch line.Level { + case zapcore.DebugLevel.String(): + pri = journal.PriDebug + case zapcore.InfoLevel.String(): + pri = journal.PriInfo + + case zapcore.WarnLevel.String(): + pri = journal.PriWarning + case zapcore.ErrorLevel.String(): + pri = journal.PriErr + + case zapcore.DPanicLevel.String(): + pri = journal.PriCrit + case zapcore.PanicLevel.String(): + pri = journal.PriCrit + case zapcore.FatalLevel.String(): + pri = journal.PriCrit + + default: + panic(fmt.Errorf("unknown log level: %q", line.Level)) + } + + err := journal.Send(string(p), pri, map[string]string{ + "PACKAGE": filepath.Dir(line.Caller), + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + }) + if err != nil { + // "journal" also falls back to stderr + // "fmt.Fprintln(os.Stderr, s)" + return w.Writer.Write(p) + } + return 0, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go new file mode 100644 index 0000000000..30e77ce044 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package systemd provides utility functions for systemd. +package systemd diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go new file mode 100644 index 0000000000..494ce372e7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go @@ -0,0 +1,29 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package systemd + +import "net" + +// DialJournal returns no error if the process can dial journal socket. +// Returns an error if dial failed, which indicates journald is not available +// (e.g. run embedded etcd as docker daemon). +// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go. +func DialJournal() error { + conn, err := net.Dial("unixgram", "/run/systemd/journal/socket") + if conn != nil { + defer conn.Close() + } + return err +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go new file mode 100644 index 0000000000..e1f21755d4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go @@ -0,0 +1,56 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "fmt" +) + +// GetCipherSuite returns the corresponding cipher suite, +// and boolean value if it is supported. +func GetCipherSuite(s string) (uint16, bool) { + for _, c := range tls.CipherSuites() { + if s == c.Name { + return c.ID, true + } + } + for _, c := range tls.InsecureCipherSuites() { + if s == c.Name { + return c.ID, true + } + } + switch s { + case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true + case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true + } + return 0, false +} + +// GetCipherSuites returns list of corresponding cipher suite IDs. +func GetCipherSuites(ss []string) ([]uint16, error) { + cs := make([]uint16, len(ss)) + for i, s := range ss { + var ok bool + cs[i], ok = GetCipherSuite(s) + if !ok { + return nil, fmt.Errorf("unexpected TLS cipher suite %q", s) + } + } + + return cs, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go new file mode 100644 index 0000000000..3b6aa670ba --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tlsutil provides utility functions for handling TLS. +package tlsutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go new file mode 100644 index 0000000000..3a5aef089a --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go @@ -0,0 +1,73 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" +) + +// NewCertPool creates x509 certPool with provided CA files. +func NewCertPool(CAFiles []string) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, CAFile := range CAFiles { + pemByte, err := ioutil.ReadFile(CAFile) + if err != nil { + return nil, err + } + + for { + var block *pem.Block + block, pemByte = pem.Decode(pemByte) + if block == nil { + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + + certPool.AddCert(cert) + } + } + + return certPool, nil +} + +// NewCert generates TLS cert by using the given cert,key and parse function. +func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { + cert, err := ioutil.ReadFile(certfile) + if err != nil { + return nil, err + } + + key, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, err + } + + if parseFunc == nil { + parseFunc = tls.X509KeyPair + } + + tlsCert, err := parseFunc(cert, key) + if err != nil { + return nil, err + } + return &tlsCert, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go new file mode 100644 index 0000000000..ffcecd8c67 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go @@ -0,0 +1,47 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "fmt" +) + +type TLSVersion string + +// Constants for TLS versions. +const ( + TLSVersionDefault TLSVersion = "" + TLSVersion12 TLSVersion = "TLS1.2" + TLSVersion13 TLSVersion = "TLS1.3" +) + +// GetTLSVersion returns the corresponding tls.Version or error. +func GetTLSVersion(version string) (uint16, error) { + var v uint16 + + switch version { + case string(TLSVersionDefault): + v = 0 // 0 means let Go decide. + case string(TLSVersion12): + v = tls.VersionTLS12 + case string(TLSVersion13): + v = tls.VersionTLS13 + default: + return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version) + } + + return v, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go new file mode 100644 index 0000000000..de8ef0bd71 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go new file mode 100644 index 0000000000..ae00388dde --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go @@ -0,0 +1,39 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "strconv" + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go new file mode 100644 index 0000000000..e7a3cdc9ab --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go @@ -0,0 +1,195 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return exists +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return values +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go new file mode 100644 index 0000000000..0dd9ca798a --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go new file mode 100644 index 0000000000..9e5d03ff64 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go new file mode 100644 index 0000000000..47690cc381 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/go.etcd.io/etcd/client/v3/LICENSE b/vendor/go.etcd.io/etcd/client/v3/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md new file mode 100644 index 0000000000..1e037d7eb6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/README.md @@ -0,0 +1,92 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/client/v3 +``` + +Warning: As etcd 3.5.0 was not yet released, the command above does not work. +After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498), +etcd can be referenced using: +``` +go get go.etcd.io/etcd/client/v3@v3.5.0-pre +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to install released versions of clients using go modules. + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/main/tests/integration/clientv3/examples/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/client/v3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code [examples](https://github.com/etcd-io/etcd/tree/main/tests/integration/clientv3/examples) can be found at [GoDoc](https://pkg.go.dev/go.etcd.io/etcd/client/v3). diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go new file mode 100644 index 0000000000..a6f75d3215 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/auth.go @@ -0,0 +1,236 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "go.etcd.io/etcd/api/v3/authpb" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthStatusResponse pb.AuthStatusResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type UserAddOptions authpb.UserAddOptions + +type Auth interface { + // Authenticate login and get token + Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) + + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // AuthStatus returns the status of auth of an etcd cluster. + AuthStatus(ctx context.Context) (*AuthStatusResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type authClient struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &authClient{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { + api := &authClient{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { + resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) + return (*AuthStatusResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go new file mode 100644 index 0000000000..efa44e8902 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -0,0 +1,612 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.etcd.io/etcd/client/v3/credentials" + "go.etcd.io/etcd/client/v3/internal/endpoint" + "go.etcd.io/etcd/client/v3/internal/resolver" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") +) + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + + cfg Config + creds grpccredentials.TransportCredentials + resolver *resolver.EtcdManualResolver + mu *sync.RWMutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + authTokenBundle credentials.Bundle + + callOpts []grpc.CallOption + + lgMu *sync.RWMutex + lg *zap.Logger +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context, opts ...Option) *Client { + cctx, cancel := context.WithCancel(ctx) + c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)} + for _, opt := range opts { + opt(c) + } + if c.lg == nil { + c.lg = zap.NewNop() + } + return c +} + +// Option is a function type that can be passed as argument to NewCtxClient to configure client +type Option func(*Client) + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// NewFromURLs creates a new etcdv3 client from URLs. +func NewFromURLs(urls []string) (*Client, error) { + return New(Config{Endpoints: urls}) +} + +// WithZapLogger is a NewCtxClient option that overrides the logger +func WithZapLogger(lg *zap.Logger) Option { + return func(c *Client) { + c.lg = lg + } +} + +// WithLogger overrides the logger. +// +// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config +// +// Does not changes grpcLogger, that can be explicitly configured +// using grpc_zap.ReplaceGrpcLoggerV2(..) method. +func (c *Client) WithLogger(lg *zap.Logger) *Client { + c.lgMu.Lock() + c.lg = lg + c.lgMu.Unlock() + return c +} + +// GetLogger gets the logger. +// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger. +func (c *Client) GetLogger() *zap.Logger { + c.lgMu.RLock() + l := c.lg + c.lgMu.RUnlock() + return l +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() []string { + // copy the slice; protect original endpoints from being changed + c.mu.RLock() + defer c.mu.RUnlock() + eps := make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return eps +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg.Endpoints = eps + + c.resolver.SetEndpoints(eps) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + if len(m.Name) != 0 && !m.IsLearner { + eps = append(eps, m.ClientURLs...) + } + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + c.lg.Info("Auto sync endpoints failed.", zap.Error(err)) + } + } + } +} + +// dialSetupOpts gives the dial opts prior to any authentication. +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + PermitWithoutStream: c.cfg.PermitWithoutStream, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Interceptor retry and backoff. + // TODO: Replace all of clientv3/retry.go with RetryPolicy: + // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130 + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + opts = append(opts, + // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. + // Streams that are safe to retry are enabled individually. + grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)), + ) + + return opts, nil +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { + creds := c.credentialsForEndpoint(ep) + + // Using ad-hoc created resolver, to guarantee only explicitly given + // endpoint is used. + return c.dial(creds, grpc.WithResolvers(resolver.New(ep))) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + + if c.Username == "" || c.Password == "" { + return nil + } + + resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) + if err != nil { + if err == rpctypes.ErrAuthNotEnabled { + c.authTokenBundle.UpdateAuthToken("") + return nil + } + return err + } + c.authTokenBundle.UpdateAuthToken(resp.Token) + return nil +} + +// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host +// of the provided endpoint determines the scheme used for all endpoints of the client connection. +func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + creds := c.credentialsForEndpoint(c.Endpoints()[0]) + opts := append(dopts, grpc.WithResolvers(c.resolver)) + return c.dial(creds, opts...) +} + +// dial configures and dials any grpc balancer target. +func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := c.dialSetupOpts(creds, dopts...) + if err != nil { + return nil, fmt.Errorf("failed to configure dialer: %v", err) + } + if c.authTokenBundle != nil { + opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) + } + + opts = append(opts, c.cfg.DialOptions...) + + dctx := c.ctx + if c.cfg.DialTimeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? + } + target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0])) + conn, err := grpc.DialContext(dctx, target, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +func authority(endpoint string) string { + spl := strings.SplitN(endpoint, "://", 2) + if len(spl) < 2 { + if strings.HasPrefix(endpoint, "unix:") { + return endpoint[len("unix:"):] + } + if strings.HasPrefix(endpoint, "unixs:") { + return endpoint[len("unixs:"):] + } + return endpoint + } + return spl[1] +} + +func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials { + r := endpoint.RequiresCredentials(ep) + switch r { + case endpoint.CREDS_DROP: + return nil + case endpoint.CREDS_OPTIONAL: + return c.creds + case endpoint.CREDS_REQUIRE: + if c.creds != nil { + return c.creds + } + return credentials.NewBundle(credentials.Config{}).TransportCredentials() + default: + panic(fmt.Errorf("unsupported CredsRequirement: %v", r)) + } +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds grpccredentials.TransportCredentials + if cfg.TLS != nil { + creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.RWMutex), + callOpts: defaultCallOpts, + lgMu: new(sync.RWMutex), + } + + var err error + if cfg.Logger != nil { + client.lg = cfg.Logger + } else if cfg.LogConfig != nil { + client.lg, err = cfg.LogConfig.Build() + } else { + client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) + if client.lg != nil { + client.lg = client.lg.Named("etcd-client") + } + } + if err != nil { + return nil, err + } + + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + client.authTokenBundle = credentials.NewBundle(credentials.Config{}) + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultWaitForReady, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + client.resolver = resolver.New(cfg.Endpoints...) + + if len(cfg.Endpoints) < 1 { + client.cancel() + return nil, fmt.Errorf("at least one Endpoint is required in client config") + } + // Use a provided endpoint target so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dialWithBalancer() + if err != nil { + client.cancel() + client.resolver.Close() + // TODO: Error like `fmt.Errorf(dialing [%s] failed: %v, strings.Join(cfg.Endpoints, ";"), err)` would help with debugging a lot. + return nil, err + } + client.conn = conn + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + //get token with established connection + ctx, cancel = client.ctx, func() {} + if client.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout) + } + err = client.getToken(ctx) + if err != nil { + client.Close() + cancel() + //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) + return nil, err + } + cancel() + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +// roundRobinQuorumBackoff retries against quorum between each backoff. +// This is intended for use with a round robin load balancer. +func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + // after each round robin across quorum, backoff for our wait between duration + n := uint(len(c.Endpoints())) + quorum := (n/2 + 1) + if attempt%quorum == 0 { + c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) + return jitterUp(waitBetween, jitterFraction) + } + c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) + return 0 + } +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + + eps := c.Endpoints() + errc := make(chan error, len(eps)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + cancel() + ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + + wg.Add(len(eps)) + for _, ep := range eps { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + var serr error + if maj, serr = strconv.Atoi(vs[0]); serr != nil { + errc <- serr + return + } + if min, serr = strconv.Atoi(vs[1]); serr != nil { + errc <- serr + return + } + } + if maj < 3 || (maj == 3 && min < 4) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for range eps { + if err = <-errc; err != nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +// isUnavailableErr returns true if the given error is an unavailable error +func isUnavailableErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return false + } + if err == nil { + return false + } + ev, ok := status.FromError(err) + if ok { + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + return ev.Code() == codes.Unavailable + } + return false +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + if ev, ok := status.FromError(err); ok { + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + } + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} + +// IsConnCanceled returns true, if error is from a closed gRPC connection. +// ref. https://github.com/grpc/grpc-go/pull/1854 +func IsConnCanceled(err error) bool { + if err == nil { + return false + } + + // >= gRPC v1.23.x + s, ok := status.FromError(err) + if ok { + // connection is canceled or server has already closed the connection + return s.Code() == codes.Canceled || s.Message() == "transport is closing" + } + + // >= gRPC v1.10.x + if err == context.Canceled { + return true + } + + // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' + return strings.Contains(err.Error(), "grpc: the client connection is closing") +} diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go new file mode 100644 index 0000000000..92d7cdb56b --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/client/pkg/v3/types" + + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/compact_op.go b/vendor/go.etcd.io/etcd/client/v3/compact_op.go new file mode 100644 index 0000000000..a6e660aa82 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/api/v3/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/compare.go b/vendor/go.etcd.io/etcd/client/v3/compare.go new file mode 100644 index 0000000000..e2967cf38e --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/compare.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/api/v3/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go new file mode 100644 index 0000000000..dcdbf511d1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go new file mode 100644 index 0000000000..31e93d2428 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go @@ -0,0 +1,254 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + v3 "go.etcd.io/etcd/client/v3" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + keyPrefix: pfx, + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election on the prefix +// key. +// Multiple sessions can participate in the election for the +// same prefix, but only one can be the leader at a time. +// +// If the context is 'context.TODO()/context.Background()', the Campaign +// will continue to be blocked for other keys to be deleted, unless server +// returns a non-recoverable error (e.g. ErrCompacted). +// Otherwise, until the context is not cancelled or timed-out, Campaign will +// continue to be blocked until it becomes the leader. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go new file mode 100644 index 0000000000..20825950f3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + v3 "go.etcd.io/etcd/client/v3" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go new file mode 100644 index 0000000000..c3800d6282 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go @@ -0,0 +1,167 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + "sync" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + v3 "go.etcd.io/etcd/client/v3" +) + +// ErrLocked is returned by TryLock when Mutex is already locked by another session. +var ErrLocked = errors.New("mutex: Locked by another session") +var ErrSessionExpired = errors.New("mutex: session is expired") + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// TryLock locks the mutex if not already locked by another session. +// If lock is held by another session, return immediately after attempting necessary cleanup +// The ctx argument is used for the sending/receiving Txn RPC. +func (m *Mutex) TryLock(ctx context.Context) error { + resp, err := m.tryAcquire(ctx) + if err != nil { + return err + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + client := m.s.Client() + // Cannot lock, so delete the key + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return ErrLocked +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + resp, err := m.tryAcquire(ctx) + if err != nil { + return err + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + client := m.s.Client() + // wait for deletion revisions prior to myKey + // TODO: early termination if the session key is deleted before other session keys with smaller revisions. + _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if wait failed + if werr != nil { + m.Unlock(client.Ctx()) + return werr + } + + // make sure the session is not expired, and the owner key still exists. + gresp, werr := client.Get(ctx, m.myKey) + if werr != nil { + m.Unlock(client.Ctx()) + return werr + } + + if len(gresp.Kvs) == 0 { // is the session key lost? + return ErrSessionExpired + } + m.hdr = gresp.Header + + return nil +} + +func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return nil, err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + return resp, nil +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go new file mode 100644 index 0000000000..7143cc4747 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "time" + + v3 "go.etcd.io/etcd/client/v3" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = resp.ID + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go new file mode 100644 index 0000000000..ba7303d097 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go @@ -0,0 +1,387 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "math" + + v3 "go.etcd.io/etcd/client/v3" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/config.go b/vendor/go.etcd.io/etcd/client/v3/config.go new file mode 100644 index 0000000000..335a288732 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/config.go @@ -0,0 +1,92 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + // For example, pass "grpc.WithBlock()" to block until the underlying connection is up. + // Without this, Dial returns immediately and connecting the server happens in background. + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + + // Logger sets client-side logger. + // If nil, fallback to building LogConfig. + Logger *zap.Logger + + // LogConfig configures client-side logger. + // If nil, use the default logger. + // TODO: configure gRPC logger + LogConfig *zap.Config + + // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). + PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker +} diff --git a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go new file mode 100644 index 0000000000..42f688eb35 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go @@ -0,0 +1,131 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials implements gRPC credential interface with etcd specific logic. +// e.g., client handshake with custom authority parameter +package credentials + +import ( + "context" + "crypto/tls" + "net" + "sync" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + grpccredentials "google.golang.org/grpc/credentials" +) + +// Config defines gRPC credential configuration. +type Config struct { + TLSConfig *tls.Config +} + +// Bundle defines gRPC credential interface. +type Bundle interface { + grpccredentials.Bundle + UpdateAuthToken(token string) +} + +// NewBundle constructs a new gRPC credential bundle. +func NewBundle(cfg Config) Bundle { + return &bundle{ + tc: newTransportCredential(cfg.TLSConfig), + rc: newPerRPCCredential(), + } +} + +// bundle implements "grpccredentials.Bundle" interface. +type bundle struct { + tc *transportCredential + rc *perRPCCredential +} + +func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials { + return b.tc +} + +func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { + return b.rc +} + +func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { + // no-op + return nil, nil +} + +// transportCredential implements "grpccredentials.TransportCredentials" interface. +type transportCredential struct { + gtc grpccredentials.TransportCredentials +} + +func newTransportCredential(cfg *tls.Config) *transportCredential { + return &transportCredential{ + gtc: grpccredentials.NewTLS(cfg), + } +} + +func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + return tc.gtc.ClientHandshake(ctx, authority, rawConn) +} + +func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { + return tc.gtc.ServerHandshake(rawConn) +} + +func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { + return tc.gtc.Info() +} + +func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { + return &transportCredential{ + gtc: tc.gtc.Clone(), + } +} + +func (tc *transportCredential) OverrideServerName(serverNameOverride string) error { + return tc.gtc.OverrideServerName(serverNameOverride) +} + +// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. +type perRPCCredential struct { + authToken string + authTokenMu sync.RWMutex +} + +func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} } + +func (rc *perRPCCredential) RequireTransportSecurity() bool { return false } + +func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + rc.authTokenMu.RLock() + authToken := rc.authToken + rc.authTokenMu.RUnlock() + if authToken == "" { + return nil, nil + } + return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil +} + +func (b *bundle) UpdateAuthToken(token string) { + if b.rc == nil { + return + } + b.rc.UpdateAuthToken(token) +} + +func (rc *perRPCCredential) UpdateAuthToken(token string) { + rc.authTokenMu.Lock() + rc.authToken = token + rc.authTokenMu.Unlock() +} diff --git a/vendor/go.etcd.io/etcd/client/v3/ctx.go b/vendor/go.etcd.io/etcd/client/v3/ctx.go new file mode 100644 index 0000000000..56b69cf2ed --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/ctx.go @@ -0,0 +1,50 @@ +// Copyright 2020 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/api/v3/version" + "google.golang.org/grpc/metadata" +) + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add 'hasleader' key/value + copied.Set(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, copied) +} + +// embeds client version +func withVersion(ctx context.Context) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { // no outgoing metadata ctx key, create one + md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, md) + } + copied := md.Copy() // avoid racey updates + // overwrite/add version key/value + copied.Set(rpctypes.MetadataClientAPIVersionKey, version.APIVersion) + return metadata.NewOutgoingContext(ctx, copied) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go new file mode 100644 index 0000000000..fd61aff117 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/doc.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// // expect dial time-out on ipv4 blackhole +// _, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"http://254.0.0.1:12345"}, +// DialTimeout: 2 * time.Second, +// }) +// +// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 +// if err == context.DeadlineExceeded { +// // handle errors +// } +// +// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 +// if err == grpc.ErrClientConnTimeout { +// // handle errors +// } +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 2 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. +// +// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided +// } else if ev, ok := status.FromError(err); ok { +// code := ev.Code() +// if code == codes.DeadlineExceeded { +// // server-side context might have timed-out first (due to clock skew) +// // while original client-side context is not timed-out yet +// } +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +// go func() { cli.Close() }() +// _, err := kvc.Get(ctx, "a") +// if err != nil { +// // with etcd clientv3 <= v3.3 +// if err == context.Canceled { +// // grpc balancer calls 'Get' with an inflight client.Close +// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x +// // grpc balancer calls 'Get' after client.Close. +// } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } +// } +// +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +package clientv3 diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go new file mode 100644 index 0000000000..35a3fe8c33 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go @@ -0,0 +1,134 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package endpoint + +import ( + "fmt" + "net" + "net/url" + "path" + "strings" +) + +type CredsRequirement int + +const ( + // CREDS_REQUIRE - Credentials/certificate required for thi type of connection. + CREDS_REQUIRE CredsRequirement = iota + // CREDS_DROP - Credentials/certificate not needed and should get ignored. + CREDS_DROP + // CREDS_OPTIONAL - Credentials/certificate might be used if supplied + CREDS_OPTIONAL +) + +func extractHostFromHostPort(ep string) string { + host, _, err := net.SplitHostPort(ep) + if err != nil { + return ep + } + return host +} + +// mustSplit2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func mustSplit2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep)) + } + return spl[0], spl[1] +} + +func schemeToCredsRequirement(schema string) CredsRequirement { + switch schema { + case "https", "unixs": + return CREDS_REQUIRE + case "http": + return CREDS_DROP + case "unix": + // Preserving previous behavior from: + // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212 + // that likely was a bug due to missing 'fallthrough'. + // At the same time it seems legit to let the users decide whether they + // want credential control or not (and 'unixs' schema is not a standard thing). + return CREDS_OPTIONAL + case "": + return CREDS_OPTIONAL + default: + return CREDS_OPTIONAL + } +} + +// This function translates endpoints names supported by etcd server into +// endpoints as supported by grpc with additional information +// (server_name for cert validation, requireCreds - whether certs are needed). +// The main differences: +// - etcd supports unixs & https names as opposed to unix & http to +// distinguish need to configure certificates. +// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. +// - etcd supports unix(s)://local-file naming schema +// (as opposed to unix:local-file canonical name used by grpc for current dir files). +// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) +// is considered serverName - to allow local testing of cert-protected communication. +// +// See more: +// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 +// - https://golang.org/pkg/net/#Dial +// - https://github.com/grpc/grpc/blob/master/doc/naming.md +func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) { + if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") { + if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { + // absolute path case + schema, absolutePath := mustSplit2(ep, "://") + return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema) + } + if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { + // legacy etcd local path + schema, localPath := mustSplit2(ep, "://") + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) + } + schema, localPath := mustSplit2(ep, ":") + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) + } + + if strings.Contains(ep, "://") { + url, err := url.Parse(ep) + if err != nil { + return ep, ep, CREDS_OPTIONAL + } + if url.Scheme == "http" || url.Scheme == "https" { + return url.Host, url.Host, schemeToCredsRequirement(url.Scheme) + } + return ep, url.Host, schemeToCredsRequirement(url.Scheme) + } + // Handles plain addresses like 10.0.0.44:437. + return ep, ep, CREDS_OPTIONAL +} + +// RequiresCredentials returns whether given endpoint requires +// credentials/certificates for connection. +func RequiresCredentials(ep string) CredsRequirement { + _, _, requireCreds := translateEndpoint(ep) + return requireCreds +} + +// Interpret endpoint parses an endpoint of the form +// (http|https)://*|(unix|unixs)://) +// and returns low-level address (supported by 'net') to connect to, +// and a server name used for x509 certificate matching. +func Interpret(ep string) (address string, serverName string) { + addr, serverName, _ := translateEndpoint(ep) + return addr, serverName +} diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go new file mode 100644 index 0000000000..3ee3cb8e2b --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go @@ -0,0 +1,74 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resolver + +import ( + "go.etcd.io/etcd/client/v3/internal/endpoint" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +const ( + Schema = "etcd-endpoints" +) + +// EtcdManualResolver is a Resolver (and resolver.Builder) that can be updated +// using SetEndpoints. +type EtcdManualResolver struct { + *manual.Resolver + endpoints []string + serviceConfig *serviceconfig.ParseResult +} + +func New(endpoints ...string) *EtcdManualResolver { + r := manual.NewBuilderWithScheme(Schema) + return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil} +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`) + if r.serviceConfig.Err != nil { + return nil, r.serviceConfig.Err + } + res, err := r.Resolver.Build(target, cc, opts) + if err != nil { + return nil, err + } + // Populates endpoints stored in r into ClientConn (cc). + r.updateState() + return res, nil +} + +func (r *EtcdManualResolver) SetEndpoints(endpoints []string) { + r.endpoints = endpoints + r.updateState() +} + +func (r EtcdManualResolver) updateState() { + if r.CC != nil { + addresses := make([]resolver.Address, len(r.endpoints)) + for i, ep := range r.endpoints { + addr, serverName := endpoint.Interpret(ep) + addresses[i] = resolver.Address{Addr: addr, ServerName: serverName} + } + state := resolver.State{ + Addresses: addresses, + ServiceConfig: r.serviceConfig, + } + r.UpdateState(state) + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go new file mode 100644 index 0000000000..5e9fb7d458 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go new file mode 100644 index 0000000000..19af9c093a --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -0,0 +1,607 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// LeaseResponseChSize is the size of buffer to store unsent lease responses. +// WARNING: DO NOT UPDATE. +// Only for testing purposes. +var LeaseResponseChSize = 16 + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // Leases retrieves all leases. + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. + // + // The returned "LeaseKeepAliveResponse" channel closes if underlying keep + // alive stream is interrupted in some way the client cannot handle itself; + // given context "ctx" is canceled or timed out. + // + // TODO(v4.0): post errors to last keep alive message before closing + // (see https://github.com/etcd-io/etcd/pull/7866) + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. The response corresponds to the + // first message from calling KeepAlive. If the response has a recoverable + // error, KeepAliveOnce will retry the RPC with a new keep alive message. + // + // In most of the cases, Keepalive should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + if ctx.Done() != nil { + go l.keepAliveCtxCloser(ctx, id, ka.donec) + } + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + defer func() { + if err := stream.CloseSend(); err != nil { + if ferr == nil { + ferr = toErr(ctx, err) + } + return + } + }() + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp = &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } + } + // still advance in order to rate-limit keep-alive sends + ka.nextKeepAlive = nextKeepAlive + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(retryConnWait): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go new file mode 100644 index 0000000000..eaa35f2d3a --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/logger.go @@ -0,0 +1,59 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "log" + "os" + + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc/grpclog" +) + +func init() { + // We override grpc logger only when the environment variable is set + // in order to not interfere by default with user's code or other libraries. + if os.Getenv("ETCD_CLIENT_DEBUG") != "" { + lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) + if err != nil { + panic(err) + } + lg = lg.Named("etcd-client") + grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) + } +} + +// SetLogger sets grpc logger. +// +// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2. +func SetLogger(l grpclog.LoggerV2) { + grpclog.SetLoggerV2(l) +} + +// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level. +func etcdClientDebugLevel() zapcore.Level { + envLevel := os.Getenv("ETCD_CLIENT_DEBUG") + if envLevel == "" || envLevel == "true" { + return zapcore.InfoLevel + } + var l zapcore.Level + if err := l.Set(envLevel); err != nil { + log.Printf("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") + return zapcore.InfoLevel + } + return l +} diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go new file mode 100644 index 0000000000..a98b8ca51e --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -0,0 +1,255 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "io" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // HashKV returns a hash of the KV state at the time of the RPC. + // If revision is zero, the hash is computed on all keys. If the revision + // is non-zero, the hash is computed on all keys at or below the given revision. + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) +} + +type maintenance struct { + lg *zap.Logger + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.Dial(endpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + } + + //get token with established connection + dctx := c.ctx + cancel := func() {} + if c.cfg.DialTimeout > 0 { + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + err = c.getToken(dctx) + cancel() + if err != nil { + conn.Close() + return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err) + } + cancel = func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } + + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + switch err { + case io.EOF: + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) + return + } + + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + }() + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil +} + +type snapshotReadCloser struct { + ctx context.Context + io.ReadCloser +} + +func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(p) + return n, toErr(rc.ctx, err) +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go new file mode 100644 index 0000000000..5251906322 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/op.go @@ -0,0 +1,583 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "go.etcd.io/etcd/api/v3/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var noPrefixEnd = []byte{0} + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for watch + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op + + isOptsWithFromKey bool + isOptsWithPrefix bool +} + +// accessors / mutators + +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +func NewOp() *Op { + return &Op{key: []byte("")} +} + +// OpGet returns "get" operation based on given key and operation options. +func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +// OpDelete returns "delete" operation based on given key and operation options. +func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +// OpPut returns "put" operation based on given key-value and operation options. +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +// OpTxn returns "txn" operation based on given transaction conditions. +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + op.isOptsWithPrefix = true + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + op.isOptsWithFromKey = true + } +} + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} + +// IsOptsWithPrefix returns true if WithPrefix option is called in the given opts. +func IsOptsWithPrefix(opts []OpOption) bool { + ret := NewOp() + for _, opt := range opts { + opt(ret) + } + + return ret.isOptsWithPrefix +} + +// IsOptsWithFromKey returns true if WithFromKey option is called in the given opts. +func IsOptsWithFromKey(opts []OpOption) bool { + ret := NewOp() + for _, opt := range opts { + opt(ret) + } + + return ret.isOptsWithFromKey +} diff --git a/vendor/go.etcd.io/etcd/client/v3/options.go b/vendor/go.etcd.io/etcd/client/v3/options.go new file mode 100644 index 0000000000..cdae1b16a2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/options.go @@ -0,0 +1,69 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + "time" + + "google.golang.org/grpc" +) + +var ( + // client-side handling retrying of request failures where data was not written to the wire or + // where server indicates it did not process the data. gRPC default is default is "WaitForReady(false)" + // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to + // transient failures. + defaultWaitForReady = grpc.WaitForReady(true) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) + + // client-side non-streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultUnaryMaxRetries uint = 100 + + // client-side streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultStreamMaxRetries = ^uint(0) // max uint + + // client-side retry backoff wait between requests. + defaultBackoffWaitBetween = 25 * time.Millisecond + + // client-side retry backoff default jitter fraction. + defaultBackoffJitterFraction = 0.10 +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{ + defaultWaitForReady, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, +} + +// MaxLeaseTTL is the maximum lease TTL value +const MaxLeaseTTL = 9000000000 diff --git a/vendor/go.etcd.io/etcd/client/v3/retry.go b/vendor/go.etcd.io/etcd/client/v3/retry.go new file mode 100644 index 0000000000..69ecc63147 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/retry.go @@ -0,0 +1,306 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type retryPolicy uint8 + +const ( + repeatable retryPolicy = iota + nonRepeatable +) + +func (rp retryPolicy) String() string { + switch rp { + case repeatable: + return "repeatable" + case nonRepeatable: + return "nonRepeatable" + default: + return "UNKNOWN" + } +} + +// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. +// +// immutable requests (e.g. Get) should be retried unless it's +// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). +// +// Returning "false" means retry should stop, since client cannot +// handle itself even with retries. +func isSafeRetryImmutableRPC(err error) bool { + eErr := rpctypes.Error(err) + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + // interrupted by non-transient server-side or gRPC-side error + // client cannot handle itself (e.g. rpctypes.ErrCompacted) + return false + } + // only retry if unavailable + ev, ok := status.FromError(err) + if !ok { + // all errors from RPC is typed "grpc/status.(*statusError)" + // (ref. https://github.com/grpc/grpc-go/pull/1782) + // + // if the error type is not "grpc/status.(*statusError)", + // it could be from "Dial" + // TODO: do not retry for now + // ref. https://github.com/grpc/grpc-go/issues/1581 + return false + } + return ev.Code() == codes.Unavailable +} + +// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry. +// +// mutable requests (e.g. Put, Delete, Txn) should only be retried +// when the status code is codes.Unavailable when initial connection +// has not been established (no endpoint is up). +// +// Returning "false" means retry should stop, otherwise it violates +// write-at-most-once semantics. +func isSafeRetryMutableRPC(err error) bool { + if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { + // not safe for mutable RPCs + // e.g. interrupted by non-transient error that client cannot handle itself, + // or transient error while the connection has already been established + return false + } + desc := rpctypes.ErrorDesc(err) + return desc == "there is no address available" || desc == "there is no connection available" +} + +type retryKVClient struct { + kc pb.KVClient +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + return &retryKVClient{ + kc: pb.NewKVClient(c.conn), + } +} +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + return rkv.kc.Put(ctx, in, opts...) +} + +func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + return rkv.kc.DeleteRange(ctx, in, opts...) +} + +func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + return rkv.kc.Txn(ctx, in, opts...) +} + +func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + return rkv.kc.Compact(ctx, in, opts...) +} + +type retryLeaseClient struct { + lc pb.LeaseClient +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + return &retryLeaseClient{ + lc: pb.NewLeaseClient(c.conn), + } +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) +} + +type retryClusterClient struct { + cc pb.ClusterClient +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + return &retryClusterClient{ + cc: pb.NewClusterClient(c.conn), + } +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + return rcc.cc.MemberAdd(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + return rcc.cc.MemberRemove(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + return rcc.cc.MemberUpdate(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + +type retryMaintenanceClient struct { + mc pb.MaintenanceClient +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + return &retryMaintenanceClient{ + mc: pb.NewMaintenanceClient(conn), + } +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + return rmc.mc.Defragment(ctx, in, opts...) +} + +func (rmc *retryMaintenanceClient) Downgrade(ctx context.Context, in *pb.DowngradeRequest, opts ...grpc.CallOption) (resp *pb.DowngradeResponse, err error) { + return rmc.mc.Downgrade(ctx, in, opts...) +} + +type retryAuthClient struct { + ac pb.AuthClient +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + return &retryAuthClient{ + ac: pb.NewAuthClient(c.conn), + } +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + return rac.ac.AuthEnable(ctx, in, opts...) +} + +func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + return rac.ac.AuthDisable(ctx, in, opts...) +} + +func (rac *retryAuthClient) AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (resp *pb.AuthStatusResponse, err error) { + return rac.ac.AuthStatus(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + return rac.ac.UserAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + return rac.ac.UserDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + return rac.ac.UserChangePassword(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + return rac.ac.UserGrantRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + return rac.ac.UserRevokeRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + return rac.ac.RoleAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + return rac.ac.RoleDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + return rac.ac.RoleGrantPermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + return rac.ac.RoleRevokePermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + return rac.ac.Authenticate(ctx, in, opts...) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go new file mode 100644 index 0000000000..7dc5ddae0f --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -0,0 +1,433 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more +// fine grained error checking required by write-at-most-once retry semantics of etcd. + +package clientv3 + +import ( + "context" + "io" + "sync" + "time" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// unaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = withVersion(ctx) + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(ctx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { + return err + } + c.GetLogger().Debug( + "retrying of unary invoker", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + ) + lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) + if lastErr == nil { + return nil + } + c.GetLogger().Warn( + "retrying of unary invoker failed", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + zap.Error(lastErr), + ) + if isContextError(lastErr) { + if ctx.Err() != nil { + // its the context deadline or cancellation. + return lastErr + } + // its the callCtx deadline or cancellation, in which case try again. + continue + } + if c.shouldRefreshToken(lastErr, callOpts) { + gterr := c.refreshToken(ctx) + if gterr != nil { + c.GetLogger().Warn( + "retrying of unary invoker failed to fetch new auth token", + zap.String("target", cc.Target()), + zap.Error(gterr), + ) + return gterr // lastErr must be invalid auth token + } + continue + } + if !isSafeRetry(c.lg, lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx = withVersion(ctx) + // getToken automatically + // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged. + if c.authTokenBundle != nil { + // equal to c.Username != "" && c.Password != "" + err := c.getToken(ctx) + if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled { + c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) + return nil, err + } + } + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(ctx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") + } + newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) + if err != nil { + c.GetLogger().Error("streamer failed to create ClientStream", zap.Error(err)) + return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + retryingStreamer := &serverStreamingRetryingStream{ + client: c, + ClientStream: newStreamer, + callOpts: callOpts, + ctx: ctx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } +} + +// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions, +// and returns a boolean value. +func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { + if rpctypes.Error(err) == rpctypes.ErrUserEmpty { + // refresh the token when username, password is present but the server returns ErrUserEmpty + // which is possible when the client token is cleared somehow + return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != "" + } + + return callOpts.retryAuth && + (rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision) +} + +func (c *Client) refreshToken(ctx context.Context) error { + if c.authTokenBundle == nil { + // c.authTokenBundle will be initialized only when + // c.Username != "" && c.Password != "". + // + // When users use the TLS CommonName based authentication, the + // authTokenBundle is always nil. But it's possible for the clients + // to get `rpctypes.ErrAuthOldRevision` response when the clients + // concurrently modify auth data (e.g, addUser, deleteUser etc.). + // In this case, there is no need to refresh the token; instead the + // clients just need to retry the operations (e.g. Put, Delete etc). + return nil + } + // clear auth token before refreshing it. + c.authTokenBundle.UpdateAuthToken("") + return c.getToken(ctx) +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + client *Client + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + ctx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { + return err + } + newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) + if err != nil { + s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err)) + return err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + } + s.setStream(newStream) + + s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr)) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.ctx.Err() != nil { + return false, err + } + // its the callCtx deadline or cancellation, in which case try again. + return true, err + } + if s.client.shouldRefreshToken(err, s.callOpts) { + gterr := s.client.refreshToken(s.ctx) + if gterr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + return false, err // return the original error for simplicity + } + return true, err + + } + return isSafeRetry(s.client.lg, err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error { + waitTime := time.Duration(0) + if attempt > 0 { + waitTime = callOpts.backoffFunc(attempt) + } + if waitTime > 0 { + timer := time.NewTimer(waitTime) + select { + case <-ctx.Done(): + timer.Stop() + return contextErrToGrpcErr(ctx.Err()) + case <-timer.C: + } + } + return nil +} + +// isSafeRetry returns "true", if request is safe for retry with the given error. +func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { + if isContextError(err) { + return false + } + switch callOpts.retryPolicy { + case repeatable: + return isSafeRetryImmutableRPC(err) + case nonRepeatable: + return isSafeRetryMutableRPC(err) + default: + lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + return false + } +} + +func isContextError(err error) bool { + return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +var ( + defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, + } +) + +// backoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type backoffFunc func(attempt uint) time.Duration + +// withRetryPolicy sets the retry policy of this call. +func withRetryPolicy(rp retryPolicy) retryOption { + return retryOption{applyFunc: func(o *options) { + o.retryPolicy = rp + }} +} + +// withMax sets the maximum number of retries on this call, or this interceptor. +func withMax(maxRetries uint) retryOption { + return retryOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc `used to control time between retries. +func withBackoff(bf backoffFunc) retryOption { + return retryOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +type options struct { + retryPolicy retryPolicy + max uint + backoffFunc backoffFunc + retryAuth bool +} + +// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor. +type retryOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options { + if len(retryOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range retryOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) { + for _, opt := range callOptions { + if co, ok := opt.(retryOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} diff --git a/vendor/go.etcd.io/etcd/client/v3/sort.go b/vendor/go.etcd.io/etcd/client/v3/sort.go new file mode 100644 index 0000000000..2bb9d9a13b --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go new file mode 100644 index 0000000000..3f6a953cf0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -0,0 +1,150 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/utils.go b/vendor/go.etcd.io/etcd/client/v3/utils.go new file mode 100644 index 0000000000..850275877d --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/utils.go @@ -0,0 +1,31 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math/rand" + "time" +) + +// jitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +// +// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go new file mode 100644 index 0000000000..41a6ec9763 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -0,0 +1,1042 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond + + // AutoWatchID is the watcher ID passed in WatchStream.Watch when no + // user-provided ID is available. If pass, an ID will automatically be assigned. + AutoWatchID = 0 + + // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch. + InvalidWatchID = -1 +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + // If the requested revision is 0 or unspecified, the returned channel will + // return watch events that happen after the server receives the watch request. + // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, + // and "WatchResponse" from this closed channel has zero events and nil "Err()". + // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, + // to release the associated resources. + // + // If the context is "context.Background/TODO", returned "WatchChan" will + // not be closed and block until event is triggered, except when server + // returns a non-recoverable error (e.g. ErrCompacted). + // For example, when context passed with "WithRequireLeader" and the + // connected server has no leader (e.g. due to network partition), + // error "etcdserver: no leader" (ErrNoLeader) will be returned, + // and then "WatchChan" is closed with non-nil "Err()". + // In order to prevent a watch stream being stuck in a partitioned node, + // make sure to wrap context with "WithRequireLeader". + // + // Otherwise, as long as the context has not been canceled or timed out, + // watch will retry on other recoverable errors forever until reconnected. + // + // TODO: explicitly set context error in the last "WatchResponse" message and close channel? + // Currently, client contexts are overwritten with "valCtx" that never closes. + // TODO(v3.4): configure watch retry policy, limit maximum retry number + // (see https://github.com/etcd-io/etcd/issues/8980) + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // RequestProgress requests a progress notify response be sent in all watch channels. + RequestProgress(ctx context.Context) error + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + callOpts []grpc.CallOption + + // mu protects the grpc streams map + mu sync.Mutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream + lg *zap.Logger +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan watchStreamRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error + + lg *zap.Logger +} + +// watchStreamRequest is a union of the supported watch request operation types +type watchStreamRequest interface { + toPB() *pb.WatchRequest +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// progressRequest is issued by the subscriber to request watch progress +type progressRequest struct { +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + w.lg = c.lg + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan watchStreamRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + lg: w.lg, + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + fragment: ow.fragment, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + var closeCh chan WatchResponse + for { + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + if closeCh == nil { + closeCh = make(chan WatchResponse, 1) + } + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + ok = false + case <-donec: + ok = false + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + continue + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + continue + } + } + break + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + // Consider context.Canceled as a successful close + if err == context.Canceled { + err = nil + } + return err +} + +// RequestProgress requests a progress notify response be sent in all watch channels. +func (w *watcher) RequestProgress(ctx context.Context) (err error) { + ctxKey := streamKeyFromCtx(ctx) + + w.mu.Lock() + if w.streams == nil { + w.mu.Unlock() + return fmt.Errorf("no stream found for context") + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + pr := &progressRequest{} + + select { + case reqc <- pr: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-donec: + if wgs.closeErr != nil { + return wgs.closeErr + } + // retry; may have dropped stream from no ctxs + return w.RequestProgress(ctx) + } +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + // check watch ID for backward compatibility (<= v3.3) + if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { + w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != InvalidWatchID { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + var cur *pb.WatchResponse + backoff := time.Millisecond + for { + select { + // Watch() requested + case req := <-w.reqc: + switch wreq := req.(type) { + case *watchRequest: + outc := make(chan WatchResponse, 1) + // TODO: pass custom watch ID? + ws := &watcherStream{ + initReq: *wreq, + id: InvalidWatchID, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + case *progressRequest: + if err := wc.Send(wreq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + + // new events from the watch client + case pbresp := <-w.respc: + if cur == nil || pbresp.Created || pbresp.Canceled { + cur = pbresp + } else if cur != nil && cur.WatchId == pbresp.WatchId { + // merge new events + cur.Events = append(cur.Events, pbresp.Events...) + // update "Fragment" field; last response with "Fragment" == false + cur.Fragment = pbresp.Fragment + } + + switch { + case pbresp.Created: + // response to head of queue creation + if len(w.resuming) != 0 { + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + } + + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + + // reset for next iteration + cur = nil + + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + + // reset for next iteration + cur = nil + + case cur.Fragment: + // watch response events are still fragmented + // continue to fetch next fragmented event arrival + continue + + default: + // dispatch to appropriate watch stream + ok := w.dispatchEvent(cur) + + // reset for next iteration + cur = nil + + if ok { + break + } + + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId)) + if err := wc.Send(req); err != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err)) + } + } + + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + backoff = w.backoffIfUnavailable(backoff, err) + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + cancelSet = make(map[int64]struct{}) + + case <-w.ctx.Done(): + return + + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + // no more watchers on this stream, shutdown, skip cancellation + if len(w.substreams)+len(w.resuming) == 0 { + return + } + if ws.id != InvalidWatchID { + // client is closing an established watch; close it on the server proactively instead of waiting + // to close when the next message arrives + cancelSet[ws.id] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: ws.id, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id)) + if err := wc.Send(req); err != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err)) + } + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + // TODO: return watch ID? + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to + // indicate they should be broadcast. + if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID { + return w.broadcastResponse(wr) + } + + return w.unicastResponse(wr, pbresp.WatchId) + +} + +// broadcastResponse send a watch response to all watch substreams. +func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { + for _, ws := range w.substreams { + select { + case ws.recvc <- wr: + case <-ws.donec: + } + } + return true +} + +// unicastResponse sends a watch response to a specific watch substream. +func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { + ws, ok := w.substreams[watchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + 1 + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = InvalidWatchID + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +var maxBackoff = 100 * time.Millisecond + +func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + return backoff +} + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + backoff := time.Millisecond + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + backoff = w.backoffIfUnavailable(backoff, err) + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + Fragment: wr.fragment, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +// toPB converts an internal progress request structure to its protobuf WatchRequest structure. +func (pr *progressRequest) toPB() *pb.WatchRequest { + req := &pb.WatchProgressRequest{} + cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/go.etcd.io/etcd/client/v3/yaml/config.go b/vendor/go.etcd.io/etcd/client/v3/yaml/config.go new file mode 100644 index 0000000000..2937286d0d --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/yaml/config.go @@ -0,0 +1,91 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml handles yaml-formatted clientv3 configuration data. +package yaml + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + + "sigs.k8s.io/yaml" + + "go.etcd.io/etcd/client/pkg/v3/tlsutil" + "go.etcd.io/etcd/client/v3" +) + +type yamlConfig struct { + clientv3.Config + + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` + Certfile string `json:"cert-file"` + Keyfile string `json:"key-file"` + TrustedCAfile string `json:"trusted-ca-file"` + + // CAfile is being deprecated. Use 'TrustedCAfile' instead. + // TODO: deprecate this in v4 + CAfile string `json:"ca-file"` +} + +// NewConfig creates a new clientv3.Config from a yaml file. +func NewConfig(fpath string) (*clientv3.Config, error) { + b, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + yc := &yamlConfig{} + + err = yaml.Unmarshal(b, yc) + if err != nil { + return nil, err + } + + if yc.InsecureTransport { + return &yc.Config, nil + } + + var ( + cert *tls.Certificate + cp *x509.CertPool + ) + + if yc.Certfile != "" && yc.Keyfile != "" { + cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) + if err != nil { + return nil, err + } + } + + if yc.TrustedCAfile != "" { + cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile}) + if err != nil { + return nil, err + } + } + + tlscfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: yc.InsecureSkipTLSVerify, + RootCAs: cp, + } + if cert != nil { + tlscfg.Certificates = []tls.Certificate{*cert} + } + yc.Config.TLS = tlscfg + + return &yc.Config, nil +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 0000000000..8e5ca7d3e2 --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 0000000000..da9d9d00b4 --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 0000000000..fbc6df7906 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofmt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 0000000000..92aa65d660 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 0000000000..11b4659761 --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,671 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @jquirke, @cdvr1993 for their contributions to this release. + +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +[#758]: https://github.com/uber-go/zap/pull/758 + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +[#751]: https://github.com/uber-go/zap/pull/751 + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +[#614]: https://github.com/uber-go/zap/pull/614 + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +[#504]: https://github.com/uber-go/zap/pull/504 + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +[#487]: https://github.com/uber-go/zap/pull/487 + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +[#402]: https://github.com/uber-go/zap/pull/402 + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e327d9aa5c --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 0000000000..ea02f3cae2 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +```bash +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +```bash +make test +make lint +``` + +## Making Changes + +Start by creating a new branch for your changes: + +```bash +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +```bash +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We _try_ to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 0000000000..b183b20bc1 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 0000000000..6652bed45f --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 0000000000..eb1cee53bd --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,76 @@ +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test + +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp + +.PHONY: all +all: lint test + +.PHONY: lint +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 0000000000..9de08927be --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,137 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 1744 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op +| zerolog | 918 ns/op | -47% | 1 allocs/op +| go-kit | 5590 ns/op | +221% | 57 allocs/op +| slog | 5640 ns/op | +223% | 40 allocs/op +| apex/log | 21184 ns/op | +1115% | 63 allocs/op +| logrus | 24338 ns/op | +1296% | 79 allocs/op +| log15 | 26054 ns/op | +1394% | 74 allocs/op + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 193 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op +| zerolog | 81 ns/op | -58% | 0 allocs/op +| slog | 322 ns/op | +67% | 0 allocs/op +| go-kit | 5377 ns/op | +2686% | 56 allocs/op +| apex/log | 19518 ns/op | +10013% | 53 allocs/op +| log15 | 19812 ns/op | +10165% | 70 allocs/op +| logrus | 21997 ns/op | +11297% | 68 allocs/op + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 165 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op +| zerolog | 95 ns/op | -42% | 0 allocs/op +| slog | 296 ns/op | +79% | 0 allocs/op +| go-kit | 415 ns/op | +152% | 9 allocs/op +| standard library | 422 ns/op | +156% | 2 allocs/op +| apex/log | 1601 ns/op | +870% | 5 allocs/op +| logrus | 3017 ns/op | +1728% | 23 allocs/op +| log15 | 3469 ns/op | +2002% | 20 allocs/op + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 0000000000..abfccb566d --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,447 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 0000000000..27fb5cd5da --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,146 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendBytes writes a single byte to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 0000000000..846323360e --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import ( + "go.uber.org/zap/internal/pool" +) + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *pool.Pool[*Buffer] +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get() + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 0000000000..345ac8b89a --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 0000000000..e76e4e64fb --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,330 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. +// +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. +// +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 0000000000..3c50d7b4d3 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,117 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// # Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// # Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// # Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 0000000000..caa04ceefd --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 0000000000..45f7b838dc --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,82 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/zap/internal/pool" + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get() + elem.error = errs[i] + err := arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + if err != nil { + return err + } + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 0000000000..c8dd3358a9 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,613 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, stacktrace.Take(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + var c interface{ Any(string, any) Field } + + switch value.(type) { + case zapcore.ObjectMarshaler: + c = anyFieldC[zapcore.ObjectMarshaler](Object) + case zapcore.ArrayMarshaler: + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) + case bool: + c = anyFieldC[bool](Bool) + case *bool: + c = anyFieldC[*bool](Boolp) + case []bool: + c = anyFieldC[[]bool](Bools) + case complex128: + c = anyFieldC[complex128](Complex128) + case *complex128: + c = anyFieldC[*complex128](Complex128p) + case []complex128: + c = anyFieldC[[]complex128](Complex128s) + case complex64: + c = anyFieldC[complex64](Complex64) + case *complex64: + c = anyFieldC[*complex64](Complex64p) + case []complex64: + c = anyFieldC[[]complex64](Complex64s) + case float64: + c = anyFieldC[float64](Float64) + case *float64: + c = anyFieldC[*float64](Float64p) + case []float64: + c = anyFieldC[[]float64](Float64s) + case float32: + c = anyFieldC[float32](Float32) + case *float32: + c = anyFieldC[*float32](Float32p) + case []float32: + c = anyFieldC[[]float32](Float32s) + case int: + c = anyFieldC[int](Int) + case *int: + c = anyFieldC[*int](Intp) + case []int: + c = anyFieldC[[]int](Ints) + case int64: + c = anyFieldC[int64](Int64) + case *int64: + c = anyFieldC[*int64](Int64p) + case []int64: + c = anyFieldC[[]int64](Int64s) + case int32: + c = anyFieldC[int32](Int32) + case *int32: + c = anyFieldC[*int32](Int32p) + case []int32: + c = anyFieldC[[]int32](Int32s) + case int16: + c = anyFieldC[int16](Int16) + case *int16: + c = anyFieldC[*int16](Int16p) + case []int16: + c = anyFieldC[[]int16](Int16s) + case int8: + c = anyFieldC[int8](Int8) + case *int8: + c = anyFieldC[*int8](Int8p) + case []int8: + c = anyFieldC[[]int8](Int8s) + case string: + c = anyFieldC[string](String) + case *string: + c = anyFieldC[*string](Stringp) + case []string: + c = anyFieldC[[]string](Strings) + case uint: + c = anyFieldC[uint](Uint) + case *uint: + c = anyFieldC[*uint](Uintp) + case []uint: + c = anyFieldC[[]uint](Uints) + case uint64: + c = anyFieldC[uint64](Uint64) + case *uint64: + c = anyFieldC[*uint64](Uint64p) + case []uint64: + c = anyFieldC[[]uint64](Uint64s) + case uint32: + c = anyFieldC[uint32](Uint32) + case *uint32: + c = anyFieldC[*uint32](Uint32p) + case []uint32: + c = anyFieldC[[]uint32](Uint32s) + case uint16: + c = anyFieldC[uint16](Uint16) + case *uint16: + c = anyFieldC[*uint16](Uint16p) + case []uint16: + c = anyFieldC[[]uint16](Uint16s) + case uint8: + c = anyFieldC[uint8](Uint8) + case *uint8: + c = anyFieldC[*uint8](Uint8p) + case []byte: + c = anyFieldC[[]byte](Binary) + case uintptr: + c = anyFieldC[uintptr](Uintptr) + case *uintptr: + c = anyFieldC[*uintptr](Uintptrp) + case []uintptr: + c = anyFieldC[[]uintptr](Uintptrs) + case time.Time: + c = anyFieldC[time.Time](Time) + case *time.Time: + c = anyFieldC[*time.Time](Timep) + case []time.Time: + c = anyFieldC[[]time.Time](Times) + case time.Duration: + c = anyFieldC[time.Duration](Duration) + case *time.Duration: + c = anyFieldC[*time.Duration](Durationp) + case []time.Duration: + c = anyFieldC[[]time.Duration](Durations) + case error: + c = anyFieldC[error](NamedError) + case []error: + c = anyFieldC[[]error](Errors) + case fmt.Stringer: + c = anyFieldC[fmt.Stringer](Stringer) + default: + c = anyFieldC[any](Reflect) + } + + return c.Any(key, value) +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 0000000000..1312875072 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 0000000000..8e1d05e9ab --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 0000000000..3cb46c9e0a --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 0000000000..2be8f65150 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// # GET +// +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + return enc.Encode(payload{Level: lvl.Level()}) + + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return enc.Encode(errorResponse{Error: err.Error()}) + } + lvl.SetLevel(requestedLvl) + return enc.Encode(payload{Level: lvl.Level()}) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 0000000000..dad583aaa5 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 0000000000..c4d5d02abc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 0000000000..f673f9947b --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var _exit = os.Exit + +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + Code int + prev func(code int) +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: _exit} + _exit = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + _exit = se.prev +} + +func (se *StubbedExit) exit(code int) { + se.Exited = true + se.Code = code +} diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 0000000000..40bfed81e6 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,37 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go new file mode 100644 index 0000000000..60e9d2c432 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go new file mode 100644 index 0000000000..82af7551f9 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace + +import ( + "runtime" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) + +// Stack is a captured stack trace. +type Stack struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// Depth specifies how deep of a stack trace should be captured. +type Depth int + +const ( + // First captures only the first frame. + First Depth = iota + + // Full captures the entire call stack, allocating more + // storage for it if needed. + Full +) + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// Capture. +// +// The caller must call Free on the returned stacktrace after using it. +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() + + switch depth { + case First: + stack.pcs = stack.storage[:1] + case Full: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == Full { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *Stack) Free() { + st.frames = nil + st.pcs = nil + _stackPool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *Stack) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *Stack) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := NewFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *Formatter) FormatStack(stack *Stack) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *Formatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 0000000000..155b208bd3 --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,153 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync/atomic" + + "go.uber.org/zap/internal" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +var _ internal.LeveledEnabler = AtomicLevel{} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 0000000000..6205fe48a6 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,432 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + +func (log *Logger) clone() *Logger { + clone := *log + return &clone +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // nil or WriteThenNoop will lead to continued execution after + // a Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the + // log.Fatal. + if onFatal == nil || onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.After(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktrace.First + if addStack { + stackDepth = stacktrace.Full + } + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _ = log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := stacktrace.NewFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 0000000000..c4f3bca3d2 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,167 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 0000000000..499772a00d --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,180 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var _sinkRegistry = newSinkRegistry() + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := sr.factories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + sr.factories[normalized] = factory + return nil +} + +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 0000000000..00ac5fe3ac --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,437 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf formats the message according to the format specifier +// and panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf formats the message according to the format specifier +// and calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 0000000000..c5a1f16225 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 0000000000..06768c6791 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, closeAll, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, closeAll, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + closeAll := func() { + for _, c := range closers { + _ = c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := _sinkRegistry.newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + closeAll() + return nil, nil, openErr + } + + return writers, closeAll, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(io.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000000..a40e93b3ec --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000000..422fd82a6b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 0000000000..8ca0bfaf56 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get() +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 0000000000..776e93f6f3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 0000000000..31000e91f7 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 0000000000..5769ff3e4e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,451 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 0000000000..459a5d7ce3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,298 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/pool" +) + +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get() + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes an os.Exit(1) after Write. + WriteThenFatal +) + +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or After on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + after CheckWriteHook + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.after = nil + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _ = ce.ErrorOutput.Sync() // ignore error + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _ = ce.ErrorOutput.Sync() // ignore error + } + + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) + } + putCheckedEntry(ce) +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.after = hook + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 0000000000..c40df13269 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,136 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + + "go.uber.org/zap/internal/pool" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + err := arr.AppendObject(el) + el.Free() + if err != nil { + return err + } + } + return nil +} + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get() + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 0000000000..95bdb0a126 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 0000000000..198def9917 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,77 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000000..7a11237ae9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 0000000000..c8ab86979b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,583 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = pool.New(func() *jsonEncoder { + return &jsonEncoder{} +}) + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// +// {"foo":"bar","foo":"baz"} +// +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := _jsonPool.Get() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 + for i := 0; i < len(s); { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + + i++ + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + + i++ + last = i + } + } + + // add remaining + appendTo(buf, s[last:]) +} diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go new file mode 100644 index 0000000000..05288d6a88 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "sync" + +type lazyWithCore struct { + Core + sync.Once + fields []Field +} + +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, + } +} + +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) +} + +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 0000000000..e01a241316 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 0000000000..7af8dadcb3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 0000000000..c3c55ba0d9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 0000000000..dfead0829d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 0000000000..8746360eca --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 0000000000..b7c093a4f2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "sync/atomic" + "time" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Add(1) + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Add(1) + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 0000000000..9bb32f0557 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 0000000000..d4a1af3d07 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go new file mode 100644 index 0000000000..6823773b72 --- /dev/null +++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go @@ -0,0 +1,245 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapgrpc provides a logger that is compatible with grpclog. +package zapgrpc // import "go.uber.org/zap/zapgrpc" + +import ( + "fmt" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86 +const ( + grpcLvlInfo int = iota + grpcLvlWarn + grpcLvlError + grpcLvlFatal +) + +var ( + // _grpcToZapLevel maps gRPC log levels to zap log levels. + // See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level + _grpcToZapLevel = map[int]zapcore.Level{ + grpcLvlInfo: zapcore.InfoLevel, + grpcLvlWarn: zapcore.WarnLevel, + grpcLvlError: zapcore.ErrorLevel, + grpcLvlFatal: zapcore.FatalLevel, + } +) + +// An Option overrides a Logger's default configuration. +type Option interface { + apply(*Logger) +} + +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WithDebug configures a Logger to print at zap's DebugLevel instead of +// InfoLevel. +// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API. +// +// Deprecated: use grpclog.SetLoggerV2() for v2 API. +func WithDebug() Option { + return optionFunc(func(logger *Logger) { + logger.print = &printer{ + enab: logger.levelEnabler, + level: zapcore.DebugLevel, + print: logger.delegate.Debug, + printf: logger.delegate.Debugf, + } + }) +} + +// withWarn redirects the fatal level to the warn level, which makes testing +// easier. This is intentionally unexported. +func withWarn() Option { + return optionFunc(func(logger *Logger) { + logger.fatal = &printer{ + enab: logger.levelEnabler, + level: zapcore.WarnLevel, + print: logger.delegate.Warn, + printf: logger.delegate.Warnf, + } + }) +} + +// NewLogger returns a new Logger. +func NewLogger(l *zap.Logger, options ...Option) *Logger { + logger := &Logger{ + delegate: l.Sugar(), + levelEnabler: l.Core(), + } + logger.print = &printer{ + enab: logger.levelEnabler, + level: zapcore.InfoLevel, + print: logger.delegate.Info, + printf: logger.delegate.Infof, + } + logger.fatal = &printer{ + enab: logger.levelEnabler, + level: zapcore.FatalLevel, + print: logger.delegate.Fatal, + printf: logger.delegate.Fatalf, + } + for _, option := range options { + option.apply(logger) + } + return logger +} + +// printer implements Print, Printf, and Println operations for a Zap level. +// +// We use it to customize Debug vs Info, and Warn vs Fatal for Print and Fatal +// respectively. +type printer struct { + enab zapcore.LevelEnabler + level zapcore.Level + print func(...interface{}) + printf func(string, ...interface{}) +} + +func (v *printer) Print(args ...interface{}) { + v.print(args...) +} + +func (v *printer) Printf(format string, args ...interface{}) { + v.printf(format, args...) +} + +func (v *printer) Println(args ...interface{}) { + if v.enab.Enabled(v.level) { + v.print(sprintln(args)) + } +} + +// Logger adapts zap's Logger to be compatible with grpclog.LoggerV2 and the deprecated grpclog.Logger. +type Logger struct { + delegate *zap.SugaredLogger + levelEnabler zapcore.LevelEnabler + print *printer + fatal *printer + // printToDebug bool + // fatalToWarn bool +} + +// Print implements grpclog.Logger. +// +// Deprecated: use [Logger.Info]. +func (l *Logger) Print(args ...interface{}) { + l.print.Print(args...) +} + +// Printf implements grpclog.Logger. +// +// Deprecated: use [Logger.Infof]. +func (l *Logger) Printf(format string, args ...interface{}) { + l.print.Printf(format, args...) +} + +// Println implements grpclog.Logger. +// +// Deprecated: use [Logger.Info]. +func (l *Logger) Println(args ...interface{}) { + l.print.Println(args...) +} + +// Info implements grpclog.LoggerV2. +func (l *Logger) Info(args ...interface{}) { + l.delegate.Info(args...) +} + +// Infoln implements grpclog.LoggerV2. +func (l *Logger) Infoln(args ...interface{}) { + if l.levelEnabler.Enabled(zapcore.InfoLevel) { + l.delegate.Info(sprintln(args)) + } +} + +// Infof implements grpclog.LoggerV2. +func (l *Logger) Infof(format string, args ...interface{}) { + l.delegate.Infof(format, args...) +} + +// Warning implements grpclog.LoggerV2. +func (l *Logger) Warning(args ...interface{}) { + l.delegate.Warn(args...) +} + +// Warningln implements grpclog.LoggerV2. +func (l *Logger) Warningln(args ...interface{}) { + if l.levelEnabler.Enabled(zapcore.WarnLevel) { + l.delegate.Warn(sprintln(args)) + } +} + +// Warningf implements grpclog.LoggerV2. +func (l *Logger) Warningf(format string, args ...interface{}) { + l.delegate.Warnf(format, args...) +} + +// Error implements grpclog.LoggerV2. +func (l *Logger) Error(args ...interface{}) { + l.delegate.Error(args...) +} + +// Errorln implements grpclog.LoggerV2. +func (l *Logger) Errorln(args ...interface{}) { + if l.levelEnabler.Enabled(zapcore.ErrorLevel) { + l.delegate.Error(sprintln(args)) + } +} + +// Errorf implements grpclog.LoggerV2. +func (l *Logger) Errorf(format string, args ...interface{}) { + l.delegate.Errorf(format, args...) +} + +// Fatal implements grpclog.LoggerV2. +func (l *Logger) Fatal(args ...interface{}) { + l.fatal.Print(args...) +} + +// Fatalln implements grpclog.LoggerV2. +func (l *Logger) Fatalln(args ...interface{}) { + l.fatal.Println(args...) +} + +// Fatalf implements grpclog.LoggerV2. +func (l *Logger) Fatalf(format string, args ...interface{}) { + l.fatal.Printf(format, args...) +} + +// V implements grpclog.LoggerV2. +func (l *Logger) V(level int) bool { + return l.levelEnabler.Enabled(_grpcToZapLevel[level]) +} + +func sprintln(args []interface{}) string { + s := fmt.Sprintln(args...) + // Drop the new line character added by Sprintln + return s[:len(s)-1] +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b330ccedc8..83c3829826 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -535,8 +535,8 @@ const minBatchSize = 1000 // size is too low to give stream goroutines a chance to fill it up. // // Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { if l.logger.V(logLevel) { @@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) { } if !isIOError(err) { l.framer.writer.Flush() - l.conn.Close() } l.cbuf.finish() }() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c33ac5961b..eff8799640 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -451,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } close(t.writerDone) }() return t, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f6bac0e8a0..a206e2eef7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -322,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() + err := t.loopy.run() close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + select { + case <-t.readerDone: + case <-time.After(time.Second): + } + t.conn.Close() + } }() go t.keepalive() return t, nil @@ -609,8 +625,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { defer func() { - <-t.loopyWriterDone close(t.readerDone) + <-t.loopyWriterDone }() for { t.controlBuf.throttle() @@ -1329,6 +1345,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } + t.framer.writer.Flush() if retErr != nil { return false, retErr } @@ -1349,7 +1366,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-t.drainEvent.Done(): diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 0000000000..f2efa2a2cb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual defines a resolver that can be used to manually send resolved +// addresses to ClientConn. +package manual + +import ( + "sync" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + UpdateStateCallback: func(error) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // UpdateStateCallback is called when the UpdateState method is called on + // the resolver. The value passed as argument to this callback is the value + // returned by the resolver.ClientConn. Must not be nil. Must not be + // changed after the resolver may be built. + UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. + ResolveNowCallback func(resolver.ResolveNowOptions) + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string + + // Fields actually belong to the resolver. + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State +} + +// InitialState adds initial state to the resolver so that UpdateState doesn't +// need to be explicitly called after Dial. +func (r *Resolver) InitialState(s resolver.State) { + r.lastSeenState = &s +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) + r.mu.Lock() + defer r.mu.Unlock() + r.CC = cc + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) + } + return r, nil +} + +// Scheme returns the manual resolver's scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ResolveNowCallback(o) +} + +// Close is a noop for Resolver. +func (r *Resolver) Close() { + r.CloseCallback() +} + +// UpdateState calls CC.UpdateState. +func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.CC == nil { + panic("cannot update state as grpc.Dial with resolver has not been called") + } + err = r.CC.UpdateState(s) + r.lastSeenState = &s + r.UpdateStateCallback(err) +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.CC == nil { + panic("cannot report error as grpc.Dial with resolver has not been called") + } + r.CC.ReportError(err) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1ad1ba2ad6..f1aec4c0ad 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.61.0" +const Version = "1.61.1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go new file mode 100644 index 0000000000..325781f5fd --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go @@ -0,0 +1,92 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + fakeapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + fakeapiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return &fakeapiextensionsv1.FakeApiextensionsV1{Fake: &c.Fake} +} + +// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client +func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { + return &fakeapiextensionsv1beta1.FakeApiextensionsV1beta1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go new file mode 100644 index 0000000000..9b99e71670 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go new file mode 100644 index 0000000000..355c699073 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + apiextensionsv1.AddToScheme, + apiextensionsv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_apiextensions_client.go new file mode 100644 index 0000000000..43b4ee7a2f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_apiextensions_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeApiextensionsV1 struct { + *testing.Fake +} + +func (c *FakeApiextensionsV1) CustomResourceDefinitions() v1.CustomResourceDefinitionInterface { + return &FakeCustomResourceDefinitions{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApiextensionsV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go new file mode 100644 index 0000000000..9402e05697 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake/fake_customresourcedefinition.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCustomResourceDefinitions implements CustomResourceDefinitionInterface +type FakeCustomResourceDefinitions struct { + Fake *FakeApiextensionsV1 +} + +var customresourcedefinitionsResource = v1.SchemeGroupVersion.WithResource("customresourcedefinitions") + +var customresourcedefinitionsKind = v1.SchemeGroupVersion.WithKind("CustomResourceDefinition") + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *FakeCustomResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(customresourcedefinitionsResource, name), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *FakeCustomResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(customresourcedefinitionsResource, customresourcedefinitionsKind, opts), &v1.CustomResourceDefinitionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.CustomResourceDefinitionList{ListMeta: obj.(*v1.CustomResourceDefinitionList).ListMeta} + for _, item := range obj.(*v1.CustomResourceDefinitionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *FakeCustomResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(customresourcedefinitionsResource, opts)) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(customresourcedefinitionsResource, customResourceDefinition), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(customresourcedefinitionsResource, customResourceDefinition), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(customresourcedefinitionsResource, "status", customResourceDefinition), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *FakeCustomResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(customresourcedefinitionsResource, name, opts), &v1.CustomResourceDefinition{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCustomResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(customresourcedefinitionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1.CustomResourceDefinitionList{}) + return err +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, name, pt, data, subresources...), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. +func (c *FakeCustomResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, *name, types.ApplyPatchType, data), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeCustomResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, *name, types.ApplyPatchType, data, "status"), &v1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1.CustomResourceDefinition), err +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go new file mode 100644 index 0000000000..288683ef97 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeApiextensionsV1beta1 struct { + *testing.Fake +} + +func (c *FakeApiextensionsV1beta1) CustomResourceDefinitions() v1beta1.CustomResourceDefinitionInterface { + return &FakeCustomResourceDefinitions{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApiextensionsV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go new file mode 100644 index 0000000000..250d69a634 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCustomResourceDefinitions implements CustomResourceDefinitionInterface +type FakeCustomResourceDefinitions struct { + Fake *FakeApiextensionsV1beta1 +} + +var customresourcedefinitionsResource = v1beta1.SchemeGroupVersion.WithResource("customresourcedefinitions") + +var customresourcedefinitionsKind = v1beta1.SchemeGroupVersion.WithKind("CustomResourceDefinition") + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *FakeCustomResourceDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(customresourcedefinitionsResource, name), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *FakeCustomResourceDefinitions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(customresourcedefinitionsResource, customresourcedefinitionsKind, opts), &v1beta1.CustomResourceDefinitionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.CustomResourceDefinitionList{ListMeta: obj.(*v1beta1.CustomResourceDefinitionList).ListMeta} + for _, item := range obj.(*v1beta1.CustomResourceDefinitionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *FakeCustomResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(customresourcedefinitionsResource, opts)) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *FakeCustomResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(customresourcedefinitionsResource, customResourceDefinition), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *FakeCustomResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(customresourcedefinitionsResource, customResourceDefinition), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCustomResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(customresourcedefinitionsResource, "status", customResourceDefinition), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *FakeCustomResourceDefinitions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(customresourcedefinitionsResource, name, opts), &v1beta1.CustomResourceDefinition{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCustomResourceDefinitions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(customresourcedefinitionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.CustomResourceDefinitionList{}) + return err +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *FakeCustomResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, name, pt, data, subresources...), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. +func (c *FakeCustomResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, *name, types.ApplyPatchType, data), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeCustomResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { + if customResourceDefinition == nil { + return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") + } + data, err := json.Marshal(customResourceDefinition) + if err != nil { + return nil, err + } + name := customResourceDefinition.Name + if name == nil { + return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(customresourcedefinitionsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.CustomResourceDefinition{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomResourceDefinition), err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a9f0762273..aa684c9795 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -30,6 +30,9 @@ github.com/Microsoft/hcsshim/osversion # github.com/andybalholm/brotli v1.0.1 ## explicit; go 1.12 github.com/andybalholm/brotli +# github.com/armon/go-metrics v0.4.1 +## explicit; go 1.12 +github.com/armon/go-metrics # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator @@ -55,7 +58,7 @@ github.com/chai2010/gettext-go/po # github.com/cilium/charts v0.0.0-20240131194518-c3dab910c790 ## explicit; go 1.17 github.com/cilium/charts -# github.com/cilium/cilium v1.15.0 +# github.com/cilium/cilium v1.15.1 ## explicit; go 1.21.0 github.com/cilium/cilium/api/v1/client github.com/cilium/cilium/api/v1/client/bgp @@ -77,75 +80,159 @@ github.com/cilium/cilium/api/v1/models github.com/cilium/cilium/api/v1/observer github.com/cilium/cilium/api/v1/relay github.com/cilium/cilium/api/v1/server/restapi/bgp +github.com/cilium/cilium/daemon/k8s github.com/cilium/cilium/pkg/alibabacloud/eni/types +github.com/cilium/cilium/pkg/allocator +github.com/cilium/cilium/pkg/annotation github.com/cilium/cilium/pkg/api github.com/cilium/cilium/pkg/aws/eni/types github.com/cilium/cilium/pkg/azure/types +github.com/cilium/cilium/pkg/backoff +github.com/cilium/cilium/pkg/bgpv1/agent +github.com/cilium/cilium/pkg/bgpv1/agent/signaler github.com/cilium/cilium/pkg/bgpv1/api github.com/cilium/cilium/pkg/bgpv1/types +github.com/cilium/cilium/pkg/bpf +github.com/cilium/cilium/pkg/byteorder github.com/cilium/cilium/pkg/cidr github.com/cilium/cilium/pkg/client github.com/cilium/cilium/pkg/clustermesh/types github.com/cilium/cilium/pkg/command github.com/cilium/cilium/pkg/command/exec +github.com/cilium/cilium/pkg/common github.com/cilium/cilium/pkg/comparator github.com/cilium/cilium/pkg/components +github.com/cilium/cilium/pkg/container +github.com/cilium/cilium/pkg/controller +github.com/cilium/cilium/pkg/counter +github.com/cilium/cilium/pkg/crypto/certificatemanager +github.com/cilium/cilium/pkg/datapath/linux/bandwidth +github.com/cilium/cilium/pkg/datapath/linux/config/defines github.com/cilium/cilium/pkg/datapath/linux/probes +github.com/cilium/cilium/pkg/datapath/loader/metrics +github.com/cilium/cilium/pkg/datapath/tunnel +github.com/cilium/cilium/pkg/datapath/types +github.com/cilium/cilium/pkg/debug github.com/cilium/cilium/pkg/defaults +github.com/cilium/cilium/pkg/ebpf github.com/cilium/cilium/pkg/endpoint/id +github.com/cilium/cilium/pkg/endpoint/regeneration github.com/cilium/cilium/pkg/envoy/resource +github.com/cilium/cilium/pkg/eventqueue github.com/cilium/cilium/pkg/fqdn/dns github.com/cilium/cilium/pkg/fqdn/matchpattern github.com/cilium/cilium/pkg/fqdn/re +github.com/cilium/cilium/pkg/fqdn/restore github.com/cilium/cilium/pkg/health/client github.com/cilium/cilium/pkg/health/defaults github.com/cilium/cilium/pkg/hive github.com/cilium/cilium/pkg/hive/cell github.com/cilium/cilium/pkg/hive/internal +github.com/cilium/cilium/pkg/hive/job github.com/cilium/cilium/pkg/hive/metrics github.com/cilium/cilium/pkg/iana github.com/cilium/cilium/pkg/identity +github.com/cilium/cilium/pkg/identity/cache +github.com/cilium/cilium/pkg/identity/identitymanager +github.com/cilium/cilium/pkg/identity/key +github.com/cilium/cilium/pkg/identity/model +github.com/cilium/cilium/pkg/idpool github.com/cilium/cilium/pkg/inctimer github.com/cilium/cilium/pkg/ip github.com/cilium/cilium/pkg/ipam/option github.com/cilium/cilium/pkg/ipam/types +github.com/cilium/cilium/pkg/ipcache/types +github.com/cilium/cilium/pkg/k8s github.com/cilium/cilium/pkg/k8s/apis/cilium.io github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2 github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1 +github.com/cilium/cilium/pkg/k8s/client github.com/cilium/cilium/pkg/k8s/client/clientset/versioned +github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2 +github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1 +github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake +github.com/cilium/cilium/pkg/k8s/constants +github.com/cilium/cilium/pkg/k8s/identitybackend +github.com/cilium/cilium/pkg/k8s/informer +github.com/cilium/cilium/pkg/k8s/metrics +github.com/cilium/cilium/pkg/k8s/resource github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme +github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset +github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1 github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1 github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation +github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1 github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection +github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1 +github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake +github.com/cilium/cilium/pkg/k8s/types github.com/cilium/cilium/pkg/k8s/utils +github.com/cilium/cilium/pkg/k8s/version +github.com/cilium/cilium/pkg/k8s/watchers/resources +github.com/cilium/cilium/pkg/kvstore +github.com/cilium/cilium/pkg/kvstore/allocator +github.com/cilium/cilium/pkg/kvstore/store github.com/cilium/cilium/pkg/labels github.com/cilium/cilium/pkg/loadbalancer github.com/cilium/cilium/pkg/lock github.com/cilium/cilium/pkg/logging github.com/cilium/cilium/pkg/logging/logfields github.com/cilium/cilium/pkg/mac +github.com/cilium/cilium/pkg/maps/bwmap +github.com/cilium/cilium/pkg/maps/lxcmap github.com/cilium/cilium/pkg/metrics github.com/cilium/cilium/pkg/metrics/metric github.com/cilium/cilium/pkg/metrics/metric/collections github.com/cilium/cilium/pkg/monitor/api github.com/cilium/cilium/pkg/monitor/notifications +github.com/cilium/cilium/pkg/mountinfo +github.com/cilium/cilium/pkg/node github.com/cilium/cilium/pkg/node/addressing +github.com/cilium/cilium/pkg/node/types github.com/cilium/cilium/pkg/option +github.com/cilium/cilium/pkg/policy github.com/cilium/cilium/pkg/policy/api +github.com/cilium/cilium/pkg/policy/trafficdirection github.com/cilium/cilium/pkg/promise +github.com/cilium/cilium/pkg/proxy/accesslog +github.com/cilium/cilium/pkg/rand +github.com/cilium/cilium/pkg/rate +github.com/cilium/cilium/pkg/rate/metrics +github.com/cilium/cilium/pkg/safeio github.com/cilium/cilium/pkg/safetime +github.com/cilium/cilium/pkg/service/store github.com/cilium/cilium/pkg/slices github.com/cilium/cilium/pkg/source github.com/cilium/cilium/pkg/spanstat github.com/cilium/cilium/pkg/stream +github.com/cilium/cilium/pkg/sysctl github.com/cilium/cilium/pkg/time +github.com/cilium/cilium/pkg/trigger +github.com/cilium/cilium/pkg/types +github.com/cilium/cilium/pkg/u8proto github.com/cilium/cilium/pkg/version github.com/cilium/cilium/pkg/versioncheck +github.com/cilium/cilium/pkg/wireguard/types # github.com/cilium/ebpf v0.12.3 ## explicit; go 1.20 github.com/cilium/ebpf @@ -164,6 +251,7 @@ github.com/cilium/ebpf/link github.com/cilium/hubble/pkg/printer # github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 ## explicit; go 1.20 +github.com/cilium/proxy/go/cilium/api github.com/cilium/proxy/go/envoy/admin/v3 github.com/cilium/proxy/go/envoy/annotations github.com/cilium/proxy/go/envoy/config/accesslog/v3 @@ -470,6 +558,12 @@ github.com/containerd/containerd/version # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log +# github.com/coreos/go-semver v0.3.1 +## explicit; go 1.8 +github.com/coreos/go-semver/semver +# github.com/coreos/go-systemd/v22 v22.5.0 +## explicit; go 1.12 +github.com/coreos/go-systemd/v22/journal # github.com/cyphar/filepath-securejoin v0.2.4 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin @@ -638,7 +732,9 @@ github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit @@ -716,12 +812,30 @@ github.com/gosuri/uitable/util/wordwrap # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit github.com/gregjones/httpcache +# github.com/hashicorp/consul/api v1.26.1 +## explicit; go 1.19 +github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 +github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-hclog v1.5.0 +## explicit; go 1.13 +github.com/hashicorp/go-hclog +# github.com/hashicorp/go-immutable-radix v1.3.1 +## explicit +github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/go-rootcerts v1.0.2 +## explicit; go 1.12 +github.com/hashicorp/go-rootcerts +# github.com/hashicorp/golang-lru v0.5.4 +## explicit; go 1.12 +github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2/internal @@ -738,6 +852,9 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token +# github.com/hashicorp/serf v0.10.1 +## explicit; go 1.12 +github.com/hashicorp/serf/coordinate # github.com/huandu/xstrings v1.4.0 ## explicit; go 1.12 github.com/huandu/xstrings @@ -819,6 +936,9 @@ github.com/mholt/archiver/v3 # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure +# github.com/mitchellh/go-homedir v1.1.0 +## explicit +github.com/mitchellh/go-homedir # github.com/mitchellh/go-wordwrap v1.0.1 ## explicit; go 1.14 github.com/mitchellh/go-wordwrap @@ -1063,6 +1183,28 @@ github.com/zmap/zlint/v3/lints/etsi github.com/zmap/zlint/v3/lints/mozilla github.com/zmap/zlint/v3/lints/rfc github.com/zmap/zlint/v3/util +# go.etcd.io/etcd/api/v3 v3.5.11 +## explicit; go 1.20 +go.etcd.io/etcd/api/v3/authpb +go.etcd.io/etcd/api/v3/etcdserverpb +go.etcd.io/etcd/api/v3/membershippb +go.etcd.io/etcd/api/v3/mvccpb +go.etcd.io/etcd/api/v3/v3rpc/rpctypes +go.etcd.io/etcd/api/v3/version +# go.etcd.io/etcd/client/pkg/v3 v3.5.11 +## explicit; go 1.20 +go.etcd.io/etcd/client/pkg/v3/logutil +go.etcd.io/etcd/client/pkg/v3/systemd +go.etcd.io/etcd/client/pkg/v3/tlsutil +go.etcd.io/etcd/client/pkg/v3/types +# go.etcd.io/etcd/client/v3 v3.5.11 +## explicit; go 1.20 +go.etcd.io/etcd/client/v3 +go.etcd.io/etcd/client/v3/concurrency +go.etcd.io/etcd/client/v3/credentials +go.etcd.io/etcd/client/v3/internal/endpoint +go.etcd.io/etcd/client/v3/internal/resolver +go.etcd.io/etcd/client/v3/yaml # go.mongodb.org/mongo-driver v1.13.1 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson @@ -1120,6 +1262,18 @@ go.uber.org/dig/internal/graph # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr +# go.uber.org/zap v1.26.0 +## explicit; go 1.19 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace +go.uber.org/zap/zapcore +go.uber.org/zap/zapgrpc # go4.org/netipx v0.0.0-20231129151722-fdeea329fbba ## explicit; go 1.18 go4.org/netipx @@ -1219,7 +1373,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 # google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.61.0 +# google.golang.org/grpc v1.61.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1268,6 +1422,7 @@ google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status @@ -1432,9 +1587,12 @@ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake # k8s.io/apimachinery v0.29.0 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality @@ -1774,6 +1932,9 @@ oras.land/oras-go/pkg/registry/remote/auth oras.land/oras-go/pkg/registry/remote/internal/errutil oras.land/oras-go/pkg/registry/remote/internal/syncutil oras.land/oras-go/pkg/target +# sigs.k8s.io/controller-runtime v0.16.3 +## explicit; go 1.20 +sigs.k8s.io/controller-runtime/pkg/client/apiutil # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 0000000000..6a1bfb546e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,246 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + // Use the given scheme to retrieve all the GVKs for the object. + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + switch { + case len(gvks) < 1: + // If the object has no GVK, the object might not have been registered with the scheme. + // or it's not a valid object. + return schema.GroupVersionKind{}, fmt.Errorf("no GroupVersionKind associated with Go type %T, was the type registered with the Scheme?", obj) + case len(gvks) > 1: + err := fmt.Errorf("multiple GroupVersionKinds associated with Go type %T within the Scheme, this can happen when a type is registered for multiple GVKs at the same time", obj) + + // We've found multiple GVKs for the object. + currentGVK := obj.GetObjectKind().GroupVersionKind() + if !currentGVK.Empty() { + // If the base object has a GVK, check if it's in the list of GVKs before using it. + for _, gvk := range gvks { + if gvk == currentGVK { + return gvk, nil + } + } + + return schema.GroupVersionKind{}, fmt.Errorf( + "%w: the object's supplied GroupVersionKind %q was not found in the Scheme's list; refusing to guess at one: %q", err, currentGVK, gvks) + } + + // This should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine. + // + // See https://github.com/kubernetes-sigs/controller-runtime/issues/362 + // for more information. + return schema.GroupVersionKind{}, fmt.Errorf( + "%w: callers can either fix their type registration to only register it once, or specify the GroupVersionKind to use for object passed in; refusing to guess at one: %q", err, gvks) + default: + // In any other case, we've found a single GVK for the object. + return gvks[0], nil + } +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go new file mode 100644 index 0000000000..c216c49d2a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "sort" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. +// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for +// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. +type ErrResourceDiscoveryFailed map[schema.GroupVersion]error + +// Error implements the error interface. +func (e *ErrResourceDiscoveryFailed) Error() string { + subErrors := []string{} + for k, v := range *e { + subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(subErrors) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) +} + +func (e *ErrResourceDiscoveryFailed) Unwrap() []error { + subErrors := []error{} + for gv, err := range *e { + if apierrors.IsNotFound(err) { + err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} + } + subErrors = append(subErrors, err) + } + return subErrors +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go new file mode 100644 index 0000000000..d5e03b2b19 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -0,0 +1,294 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "net/http" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client *discovery.DiscoveryClient + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup + + // mutex to provide thread-safe mapper reloading. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err + } + res, err = m.getMapper().KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err + } + res, err = m.getMapper().ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err + } + res, err = m.getMapper().ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + if len(versions) == 0 { + apiGroup, err := m.findAPIGroupByName(groupName) + if err != nil { + return err + } + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + // Create or fetch group resources from cache. + groupResources := &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupName}, + VersionedResources: make(map[string][]metav1.APIResource), + } + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + for version, resources := range groupVersionResources { + groupResources.VersionedResources[version.Version] = resources.APIResources + } + + // Update information for group resources about the API group by adding new versions. + // Ignore the versions that are already registered. + for _, version := range versions { + found := false + for _, v := range groupResources.Group.Versions { + if v.Version == version { + found = true + break + } + } + + if !found { + groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + Version: version, + }) + } + } + + // Update data in the cache. + m.knownGroups[groupName] = groupResources + + // Finally, update the group with received information and regenerate the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) + return nil +} + +// findAPIGroupByNameLocked returns API group by its name. +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { + // Looking in the cache first. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // Update the cache if nothing was found. + apiGroups, err := m.client.ServerGroups() + if err != nil { + return nil, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return nil, fmt.Errorf("received an empty API groups list") + } + + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() + + // Looking in the cache again. + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil + } + } + + // If there is still nothing, return an error. + return nil, fmt.Errorf("failed to find API group %q", groupName) +} + +// fetchGroupVersionResources fetches the resources for the specified group and its versions. +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if err != nil { + failedGroups[groupVersion] = err + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + err := ErrResourceDiscoveryFailed(failedGroups) + return nil, &err + } + + return groupVersionResources, nil +}